--- /dev/null
+From 42bec214d8bd432be6d32a1acb0a9079ecd4d142 Mon Sep 17 00:00:00 2001
+From: Sachin Prabhu <sprabhu@redhat.com>
+Date: Thu, 3 Aug 2017 13:09:03 +0530
+Subject: cifs: Fix df output for users with quota limits
+
+From: Sachin Prabhu <sprabhu@redhat.com>
+
+commit 42bec214d8bd432be6d32a1acb0a9079ecd4d142 upstream.
+
+The df for a SMB2 share triggers a GetInfo call for
+FS_FULL_SIZE_INFORMATION. The values returned are used to populate
+struct statfs.
+
+The problem is that none of the information returned by the call
+contains the total blocks available on the filesystem. Instead we use
+the blocks available to the user ie. quota limitation when filling out
+statfs.f_blocks. The information returned does contain Actual free units
+on the filesystem and is used to populate statfs.f_bfree. For users with
+quota enabled, it can lead to situations where the total free space
+reported is more than the total blocks on the system ending up with df
+reports like the following
+
+ # df -h /mnt/a
+Filesystem Size Used Avail Use% Mounted on
+//192.168.22.10/a 2.5G -2.3G 2.5G - /mnt/a
+
+To fix this problem, we instead populate both statfs.f_bfree with the
+same value as statfs.f_bavail ie. CallerAvailableAllocationUnits. This
+is similar to what is done already in the code for cifs and df now
+reports the quota information for the user used to mount the share.
+
+ # df --si /mnt/a
+Filesystem Size Used Avail Use% Mounted on
+//192.168.22.10/a 2.7G 101M 2.6G 4% /mnt/a
+
+Signed-off-by: Sachin Prabhu <sprabhu@redhat.com>
+Signed-off-by: Pierguido Lambri <plambri@redhat.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2pdu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2551,8 +2551,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_f
+ kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
+ le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
+ kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
+- kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
+- kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
++ kst->f_bfree = kst->f_bavail =
++ le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
+ return;
+ }
+
--- /dev/null
+From d3edede29f74d335f81d95a4588f5f136a9f7dcf Mon Sep 17 00:00:00 2001
+From: Ronnie Sahlberg <lsahlber@redhat.com>
+Date: Wed, 23 Aug 2017 14:48:14 +1000
+Subject: cifs: return ENAMETOOLONG for overlong names in cifs_open()/cifs_lookup()
+
+From: Ronnie Sahlberg <lsahlber@redhat.com>
+
+commit d3edede29f74d335f81d95a4588f5f136a9f7dcf upstream.
+
+Add checking for the path component length and verify it is <= the maximum
+that the server advertizes via FileFsAttributeInformation.
+
+With this patch cifs.ko will now return ENAMETOOLONG instead of ENOENT
+when users to access an overlong path.
+
+To test this, try to cd into a (non-existing) directory on a CIFS share
+that has a too long name:
+cd /mnt/aaaaaaaaaaaaaaa...
+
+and it now should show a good error message from the shell:
+bash: cd: /mnt/aaaaaaaaaaaaaaaa...aaaaaa: File name too long
+
+rh bz 1153996
+
+Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/dir.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -183,15 +183,20 @@ cifs_bp_rename_retry:
+ }
+
+ /*
++ * Don't allow path components longer than the server max.
+ * Don't allow the separator character in a path component.
+ * The VFS will not allow "/", but "\" is allowed by posix.
+ */
+ static int
+-check_name(struct dentry *direntry)
++check_name(struct dentry *direntry, struct cifs_tcon *tcon)
+ {
+ struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ int i;
+
++ if (unlikely(direntry->d_name.len >
++ tcon->fsAttrInfo.MaxPathNameComponentLength))
++ return -ENAMETOOLONG;
++
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
+ for (i = 0; i < direntry->d_name.len; i++) {
+ if (direntry->d_name.name[i] == '\\') {
+@@ -489,10 +494,6 @@ cifs_atomic_open(struct inode *inode, st
+ return finish_no_open(file, res);
+ }
+
+- rc = check_name(direntry);
+- if (rc)
+- return rc;
+-
+ xid = get_xid();
+
+ cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
+@@ -505,6 +506,11 @@ cifs_atomic_open(struct inode *inode, st
+ }
+
+ tcon = tlink_tcon(tlink);
++
++ rc = check_name(direntry, tcon);
++ if (rc)
++ goto out_free_xid;
++
+ server = tcon->ses->server;
+
+ if (server->ops->new_lease_key)
+@@ -766,7 +772,7 @@ cifs_lookup(struct inode *parent_dir_ino
+ }
+ pTcon = tlink_tcon(tlink);
+
+- rc = check_name(direntry);
++ rc = check_name(direntry, pTcon);
+ if (rc)
+ goto lookup_out;
+
--- /dev/null
+From fe4600a548f2763dec91b3b27a1245c370ceee2a Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Sat, 19 Aug 2017 13:05:58 +0100
+Subject: drm: Release driver tracking before making the object available again
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit fe4600a548f2763dec91b3b27a1245c370ceee2a upstream.
+
+This is the same bug as we fixed in commit f6cd7daecff5 ("drm: Release
+driver references to handle before making it available again"), but now
+the exposure is via the PRIME lookup tables. If we remove the
+object/handle from the PRIME lut, then a new request for the same
+object/fd will generate a new handle, thus for a short window that
+object is known to userspace by two different handles. Fix this by
+releasing the driver tracking before PRIME.
+
+Fixes: 0ff926c7d4f0 ("drm/prime: add exported buffers to current fprivs
+imported buffer list (v2)")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Daniel Vetter <daniel.vetter@intel.com>
+Cc: Rob Clark <robdclark@gmail.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: Thierry Reding <treding@nvidia.com>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20170819120558.6465-1-chris@chris-wilson.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_gem.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -711,13 +711,13 @@ drm_gem_object_release_handle(int id, vo
+ struct drm_gem_object *obj = ptr;
+ struct drm_device *dev = obj->dev;
+
++ if (dev->driver->gem_close_object)
++ dev->driver->gem_close_object(obj, file_priv);
++
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_gem_remove_prime_handles(obj, file_priv);
+ drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+
+- if (dev->driver->gem_close_object)
+- dev->driver->gem_close_object(obj, file_priv);
+-
+ drm_gem_object_handle_unreference_unlocked(obj);
+
+ return 0;
--- /dev/null
+From fc788f64f1f3eb31e87d4f53bcf1ab76590d5838 Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Fri, 18 Aug 2017 11:12:19 -0400
+Subject: nfsd: Limit end of page list when decoding NFSv4 WRITE
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit fc788f64f1f3eb31e87d4f53bcf1ab76590d5838 upstream.
+
+When processing an NFSv4 WRITE operation, argp->end should never
+point past the end of the data in the final page of the page list.
+Otherwise, nfsd4_decode_compound can walk into uninitialized memory.
+
+More critical, nfsd4_decode_write is failing to increment argp->pagelen
+when it increments argp->pagelist. This can cause later xdr decoders
+to assume more data is available than really is, which can cause server
+crashes on malformed requests.
+
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfsd/nfs4xdr.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -127,7 +127,7 @@ static void next_decode_page(struct nfsd
+ argp->p = page_address(argp->pagelist[0]);
+ argp->pagelist++;
+ if (argp->pagelen < PAGE_SIZE) {
+- argp->end = argp->p + (argp->pagelen>>2);
++ argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
+ argp->pagelen = 0;
+ } else {
+ argp->end = argp->p + (PAGE_SIZE>>2);
+@@ -1235,9 +1235,7 @@ nfsd4_decode_write(struct nfsd4_compound
+ argp->pagelen -= pages * PAGE_SIZE;
+ len -= pages * PAGE_SIZE;
+
+- argp->p = (__be32 *)page_address(argp->pagelist[0]);
+- argp->pagelist++;
+- argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
++ next_decode_page(argp);
+ }
+ argp->p += XDR_QUADLEN(len);
+
--- /dev/null
+From 64aee2a965cf2954a038b5522f11d2cd2f0f8f3e Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Thu, 22 Jun 2017 15:41:38 +0100
+Subject: perf/core: Fix group {cpu,task} validation
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 64aee2a965cf2954a038b5522f11d2cd2f0f8f3e upstream.
+
+Regardless of which events form a group, it does not make sense for the
+events to target different tasks and/or CPUs, as this leaves the group
+inconsistent and impossible to schedule. The core perf code assumes that
+these are consistent across (successfully intialised) groups.
+
+Core perf code only verifies this when moving SW events into a HW
+context. Thus, we can violate this requirement for pure SW groups and
+pure HW groups, unless the relevant PMU driver happens to perform this
+verification itself. These mismatched groups subsequently wreak havoc
+elsewhere.
+
+For example, we handle watchpoints as SW events, and reserve watchpoint
+HW on a per-CPU basis at pmu::event_init() time to ensure that any event
+that is initialised is guaranteed to have a slot at pmu::add() time.
+However, the core code only checks the group leader's cpu filter (via
+event_filter_match()), and can thus install follower events onto CPUs
+violating thier (mismatched) CPU filters, potentially installing them
+into a CPU without sufficient reserved slots.
+
+This can be triggered with the below test case, resulting in warnings
+from arch backends.
+
+ #define _GNU_SOURCE
+ #include <linux/hw_breakpoint.h>
+ #include <linux/perf_event.h>
+ #include <sched.h>
+ #include <stdio.h>
+ #include <sys/prctl.h>
+ #include <sys/syscall.h>
+ #include <unistd.h>
+
+ static int perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu,
+ int group_fd, unsigned long flags)
+ {
+ return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
+ }
+
+ char watched_char;
+
+ struct perf_event_attr wp_attr = {
+ .type = PERF_TYPE_BREAKPOINT,
+ .bp_type = HW_BREAKPOINT_RW,
+ .bp_addr = (unsigned long)&watched_char,
+ .bp_len = 1,
+ .size = sizeof(wp_attr),
+ };
+
+ int main(int argc, char *argv[])
+ {
+ int leader, ret;
+ cpu_set_t cpus;
+
+ /*
+ * Force use of CPU0 to ensure our CPU0-bound events get scheduled.
+ */
+ CPU_ZERO(&cpus);
+ CPU_SET(0, &cpus);
+ ret = sched_setaffinity(0, sizeof(cpus), &cpus);
+ if (ret) {
+ printf("Unable to set cpu affinity\n");
+ return 1;
+ }
+
+ /* open leader event, bound to this task, CPU0 only */
+ leader = perf_event_open(&wp_attr, 0, 0, -1, 0);
+ if (leader < 0) {
+ printf("Couldn't open leader: %d\n", leader);
+ return 1;
+ }
+
+ /*
+ * Open a follower event that is bound to the same task, but a
+ * different CPU. This means that the group should never be possible to
+ * schedule.
+ */
+ ret = perf_event_open(&wp_attr, 0, 1, leader, 0);
+ if (ret < 0) {
+ printf("Couldn't open mismatched follower: %d\n", ret);
+ return 1;
+ } else {
+ printf("Opened leader/follower with mismastched CPUs\n");
+ }
+
+ /*
+ * Open as many independent events as we can, all bound to the same
+ * task, CPU0 only.
+ */
+ do {
+ ret = perf_event_open(&wp_attr, 0, 0, -1, 0);
+ } while (ret >= 0);
+
+ /*
+ * Force enable/disble all events to trigger the erronoeous
+ * installation of the follower event.
+ */
+ printf("Opened all events. Toggling..\n");
+ for (;;) {
+ prctl(PR_TASK_PERF_EVENTS_DISABLE, 0, 0, 0, 0);
+ prctl(PR_TASK_PERF_EVENTS_ENABLE, 0, 0, 0, 0);
+ }
+
+ return 0;
+ }
+
+Fix this by validating this requirement regardless of whether we're
+moving events.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Zhou Chengming <zhouchengming1@huawei.com>
+Link: http://lkml.kernel.org/r/1498142498-15758-1-git-send-email-mark.rutland@arm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c | 39 +++++++++++++++++++--------------------
+ 1 file changed, 19 insertions(+), 20 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7583,28 +7583,27 @@ SYSCALL_DEFINE5(perf_event_open,
+ if (group_leader->group_leader != group_leader)
+ goto err_context;
+ /*
+- * Do not allow to attach to a group in a different
+- * task or CPU context:
++ * Make sure we're both events for the same CPU;
++ * grouping events for different CPUs is broken; since
++ * you can never concurrently schedule them anyhow.
+ */
+- if (move_group) {
+- /*
+- * Make sure we're both on the same task, or both
+- * per-cpu events.
+- */
+- if (group_leader->ctx->task != ctx->task)
+- goto err_context;
++ if (group_leader->cpu != event->cpu)
++ goto err_context;
++
++ /*
++ * Make sure we're both on the same task, or both
++ * per-CPU events.
++ */
++ if (group_leader->ctx->task != ctx->task)
++ goto err_context;
+
+- /*
+- * Make sure we're both events for the same CPU;
+- * grouping events for different CPUs is broken; since
+- * you can never concurrently schedule them anyhow.
+- */
+- if (group_leader->cpu != event->cpu)
+- goto err_context;
+- } else {
+- if (group_leader->ctx != ctx)
+- goto err_context;
+- }
++ /*
++ * Do not allow to attach to a group in a different task
++ * or CPU context. If we're moving SW events, we'll fix
++ * this up later, so allow that.
++ */
++ if (!move_group && group_leader->ctx != ctx)
++ goto err_context;
+
+ /*
+ * Only a group leader can be exclusive or pinned
input-trackpoint-add-new-trackpoint-firmware-id.patch
alsa-core-fix-unexpected-error-at-replacing-user-tlv.patch
alsa-hda-add-stereo-mic-quirk-for-lenovo-g50-70-17aa-3978.patch
+drm-release-driver-tracking-before-making-the-object-available-again.patch
+tracing-fix-freeing-of-filter-in-create_filter-when-set_str-is-false.patch
+cifs-fix-df-output-for-users-with-quota-limits.patch
+cifs-return-enametoolong-for-overlong-names-in-cifs_open-cifs_lookup.patch
+nfsd-limit-end-of-page-list-when-decoding-nfsv4-write.patch
+perf-core-fix-group-cpu-task-validation.patch
--- /dev/null
+From 8b0db1a5bdfcee0dbfa89607672598ae203c9045 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Wed, 23 Aug 2017 12:46:27 -0400
+Subject: tracing: Fix freeing of filter in create_filter() when set_str is false
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 8b0db1a5bdfcee0dbfa89607672598ae203c9045 upstream.
+
+Performing the following task with kmemleak enabled:
+
+ # cd /sys/kernel/tracing/events/irq/irq_handler_entry/
+ # echo 'enable_event:kmem:kmalloc:3 if irq >' > trigger
+ # echo 'enable_event:kmem:kmalloc:3 if irq > 31' > trigger
+ # echo scan > /sys/kernel/debug/kmemleak
+ # cat /sys/kernel/debug/kmemleak
+unreferenced object 0xffff8800b9290308 (size 32):
+ comm "bash", pid 1114, jiffies 4294848451 (age 141.139s)
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace:
+ [<ffffffff81cef5aa>] kmemleak_alloc+0x4a/0xa0
+ [<ffffffff81357938>] kmem_cache_alloc_trace+0x158/0x290
+ [<ffffffff81261c09>] create_filter_start.constprop.28+0x99/0x940
+ [<ffffffff812639c9>] create_filter+0xa9/0x160
+ [<ffffffff81263bdc>] create_event_filter+0xc/0x10
+ [<ffffffff812655e5>] set_trigger_filter+0xe5/0x210
+ [<ffffffff812660c4>] event_enable_trigger_func+0x324/0x490
+ [<ffffffff812652e2>] event_trigger_write+0x1a2/0x260
+ [<ffffffff8138cf87>] __vfs_write+0xd7/0x380
+ [<ffffffff8138f421>] vfs_write+0x101/0x260
+ [<ffffffff8139187b>] SyS_write+0xab/0x130
+ [<ffffffff81cfd501>] entry_SYSCALL_64_fastpath+0x1f/0xbe
+ [<ffffffffffffffff>] 0xffffffffffffffff
+
+The function create_filter() is passed a 'filterp' pointer that gets
+allocated, and if "set_str" is true, it is up to the caller to free it, even
+on error. The problem is that the pointer is not freed by create_filter()
+when set_str is false. This is a bug, and it is not up to the caller to free
+the filter on error if it doesn't care about the string.
+
+Link: http://lkml.kernel.org/r/1502705898-27571-2-git-send-email-chuhu@redhat.com
+
+Fixes: 38b78eb85 ("tracing: Factorize filter creation")
+Reported-by: Chunyu Hu <chuhu@redhat.com>
+Tested-by: Chunyu Hu <chuhu@redhat.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_filter.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1909,6 +1909,10 @@ static int create_filter(struct ftrace_e
+ if (err && set_str)
+ append_filter_err(ps, filter);
+ }
++ if (err && !set_str) {
++ free_event_filter(filter);
++ filter = NULL;
++ }
+ create_filter_finish(ps);
+
+ *filterp = filter;