--- /dev/null
+From 7f518ad0a212e2a6fd68630e176af1de395070a7 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Wed, 12 Aug 2015 15:10:21 +0100
+Subject: dm thin metadata: delete btrees when releasing metadata snapshot
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 7f518ad0a212e2a6fd68630e176af1de395070a7 upstream.
+
+The device details and mapping trees were just being decremented
+before. Now btree_del() is called to do a deep delete.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin-metadata.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1295,8 +1295,8 @@ static int __release_metadata_snap(struc
+ return r;
+
+ disk_super = dm_block_data(copy);
+- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
+- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
++ dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
++ dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
+ dm_sm_dec_block(pmd->metadata_sm, held_root);
+
+ return dm_tm_unlock(pmd->tm, copy);
--- /dev/null
+From e037239e5e7b61007763984aa35a8329596d8c88 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 10 Aug 2015 15:28:49 -0400
+Subject: drm/radeon: add new OLAND pci id
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit e037239e5e7b61007763984aa35a8329596d8c88 upstream.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/drm/drm_pciids.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -172,6 +172,7 @@
+ {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
++ {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
--- /dev/null
+From 3e04e2fe6d87807d27521ad6ebb9e7919d628f25 Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom@vmware.com>
+Date: Tue, 11 Aug 2015 22:31:17 -0700
+Subject: drm/vmwgfx: Fix execbuf locking issues
+
+From: Thomas Hellstrom <thellstrom@vmware.com>
+
+commit 3e04e2fe6d87807d27521ad6ebb9e7919d628f25 upstream.
+
+This addresses two issues that cause problems with viewperf maya-03 in
+situation with memory pressure.
+
+The first issue causes attempts to unreserve buffers if batched
+reservation fails due to, for example, a signal pending. While previously
+the ttm_eu api was resistant against this type of error, it is no longer
+and the lockdep code will complain about attempting to unreserve buffers
+that are not reserved. The issue is resolved by avoid calling
+ttm_eu_backoff_reservation in the buffer reserve error path.
+
+The second issue is that the binding_mutex may be held when user-space
+fence objects are created and hence during memory reclaims. This may cause
+recursive attempts to grab the binding mutex. The issue is resolved by not
+holding the binding mutex across fence creation and submission.
+
+Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
+Reviewed-by: Sinclair Yeh <syeh@vmware.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -2475,7 +2475,7 @@ int vmw_execbuf_process(struct drm_file
+
+ ret = vmw_resources_validate(sw_context);
+ if (unlikely(ret != 0))
+- goto out_err;
++ goto out_err_nores;
+
+ if (throttle_us) {
+ ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
+@@ -2511,6 +2511,7 @@ int vmw_execbuf_process(struct drm_file
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+
+ vmw_fifo_commit(dev_priv, command_size);
++ mutex_unlock(&dev_priv->binding_mutex);
+
+ vmw_query_bo_switch_commit(dev_priv, sw_context);
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
+@@ -2526,7 +2527,6 @@ int vmw_execbuf_process(struct drm_file
+ DRM_ERROR("Fence submission error. Syncing.\n");
+
+ vmw_resource_list_unreserve(&sw_context->resource_list, false);
+- mutex_unlock(&dev_priv->binding_mutex);
+
+ ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
+ (void *) fence);
--- /dev/null
+From 5c16179b550b9fd8114637a56b153c9768ea06a5 Mon Sep 17 00:00:00 2001
+From: Michael Walle <michael@walle.cc>
+Date: Tue, 21 Jul 2015 11:00:53 +0200
+Subject: EDAC, ppc4xx: Access mci->csrows array elements properly
+
+From: Michael Walle <michael@walle.cc>
+
+commit 5c16179b550b9fd8114637a56b153c9768ea06a5 upstream.
+
+The commit
+
+ de3910eb79ac ("edac: change the mem allocation scheme to
+ make Documentation/kobject.txt happy")
+
+changed the memory allocation for the csrows member. But ppc4xx_edac was
+forgotten in the patch. Fix it.
+
+Signed-off-by: Michael Walle <michael@walle.cc>
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Cc: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+Link: http://lkml.kernel.org/r/1437469253-8611-1-git-send-email-michael@walle.cc
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/edac/ppc4xx_edac.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/edac/ppc4xx_edac.c
++++ b/drivers/edac/ppc4xx_edac.c
+@@ -921,7 +921,7 @@ static int ppc4xx_edac_init_csrows(struc
+ */
+
+ for (row = 0; row < mci->nr_csrows; row++) {
+- struct csrow_info *csi = &mci->csrows[row];
++ struct csrow_info *csi = mci->csrows[row];
+
+ /*
+ * Get the configuration settings for this
--- /dev/null
+From c0ddc8c745b7f89c50385fd7aa03c78dc543fa7a Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Mon, 27 Jul 2015 00:06:55 +0200
+Subject: localmodconfig: Use Kbuild files too
+
+From: Richard Weinberger <richard@nod.at>
+
+commit c0ddc8c745b7f89c50385fd7aa03c78dc543fa7a upstream.
+
+In kbuild it is allowed to define objects in files named "Makefile"
+and "Kbuild".
+Currently localmodconfig reads objects only from "Makefile"s and misses
+modules like nouveau.
+
+Link: http://lkml.kernel.org/r/1437948415-16290-1-git-send-email-richard@nod.at
+
+Reported-and-tested-by: Leonidas Spyropoulos <artafinde@gmail.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ scripts/kconfig/streamline_config.pl | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/scripts/kconfig/streamline_config.pl
++++ b/scripts/kconfig/streamline_config.pl
+@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.'
+ my $kconfig = $ARGV[1];
+ my $lsmod_file = $ENV{'LSMOD'};
+
+-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
++my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
+ chomp @makefiles;
+
+ my %depends;
--- /dev/null
+From fed66e2cdd4f127a43fd11b8d92a99bdd429528c Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Thu, 11 Jun 2015 10:32:01 +0200
+Subject: perf: Fix fasync handling on inherited events
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit fed66e2cdd4f127a43fd11b8d92a99bdd429528c upstream.
+
+Vince reported that the fasync signal stuff doesn't work proper for
+inherited events. So fix that.
+
+Installing fasync allocates memory and sets filp->f_flags |= FASYNC,
+which upon the demise of the file descriptor ensures the allocation is
+freed and state is updated.
+
+Now for perf, we can have the events stick around for a while after the
+original FD is dead because of references from child events. So we
+cannot copy the fasync pointer around. We can however consistently use
+the parent's fasync, as that will be updated.
+
+Reported-and-Tested-by: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Arnaldo Carvalho deMelo <acme@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: eranian@google.com
+Link: http://lkml.kernel.org/r/1434011521.1495.71.camel@twins
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4218,12 +4218,20 @@ static const struct file_operations perf
+ * to user-space before waking everybody up.
+ */
+
++static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
++{
++ /* only the parent has fasync state */
++ if (event->parent)
++ event = event->parent;
++ return &event->fasync;
++}
++
+ void perf_event_wakeup(struct perf_event *event)
+ {
+ ring_buffer_wakeup(event);
+
+ if (event->pending_kill) {
+- kill_fasync(&event->fasync, SIGIO, event->pending_kill);
++ kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
+ event->pending_kill = 0;
+ }
+ }
+@@ -5432,7 +5440,7 @@ static int __perf_event_overflow(struct
+ else
+ perf_event_output(event, data, regs);
+
+- if (event->fasync && event->pending_kill) {
++ if (*perf_event_fasync(event) && event->pending_kill) {
+ event->pending_wakeup = 1;
+ irq_work_queue(&event->pending);
+ }
--- /dev/null
+From c7999c6f3fed9e383d3131474588f282ae6d56b9 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 4 Aug 2015 19:22:49 +0200
+Subject: perf: Fix PERF_EVENT_IOC_PERIOD migration race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit c7999c6f3fed9e383d3131474588f282ae6d56b9 upstream.
+
+I ran the perf fuzzer, which triggered some WARN()s which are due to
+trying to stop/restart an event on the wrong CPU.
+
+Use the normal IPI pattern to ensure we run the code on the correct CPU.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: bad7192b842c ("perf: Fix PERF_EVENT_IOC_PERIOD to force-reset the period")
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c | 75 +++++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 55 insertions(+), 20 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3562,28 +3562,21 @@ static void perf_event_for_each(struct p
+ mutex_unlock(&ctx->mutex);
+ }
+
+-static int perf_event_period(struct perf_event *event, u64 __user *arg)
+-{
+- struct perf_event_context *ctx = event->ctx;
+- int ret = 0, active;
++struct period_event {
++ struct perf_event *event;
+ u64 value;
++};
+
+- if (!is_sampling_event(event))
+- return -EINVAL;
+-
+- if (copy_from_user(&value, arg, sizeof(value)))
+- return -EFAULT;
+-
+- if (!value)
+- return -EINVAL;
++static int __perf_event_period(void *info)
++{
++ struct period_event *pe = info;
++ struct perf_event *event = pe->event;
++ struct perf_event_context *ctx = event->ctx;
++ u64 value = pe->value;
++ bool active;
+
+- raw_spin_lock_irq(&ctx->lock);
++ raw_spin_lock(&ctx->lock);
+ if (event->attr.freq) {
+- if (value > sysctl_perf_event_sample_rate) {
+- ret = -EINVAL;
+- goto unlock;
+- }
+-
+ event->attr.sample_freq = value;
+ } else {
+ event->attr.sample_period = value;
+@@ -3602,11 +3595,53 @@ static int perf_event_period(struct perf
+ event->pmu->start(event, PERF_EF_RELOAD);
+ perf_pmu_enable(ctx->pmu);
+ }
++ raw_spin_unlock(&ctx->lock);
++
++ return 0;
++}
++
++static int perf_event_period(struct perf_event *event, u64 __user *arg)
++{
++ struct period_event pe = { .event = event, };
++ struct perf_event_context *ctx = event->ctx;
++ struct task_struct *task;
++ u64 value;
++
++ if (!is_sampling_event(event))
++ return -EINVAL;
++
++ if (copy_from_user(&value, arg, sizeof(value)))
++ return -EFAULT;
++
++ if (!value)
++ return -EINVAL;
++
++ if (event->attr.freq && value > sysctl_perf_event_sample_rate)
++ return -EINVAL;
++
++ task = ctx->task;
++ pe.value = value;
++
++ if (!task) {
++ cpu_function_call(event->cpu, __perf_event_period, &pe);
++ return 0;
++ }
++
++retry:
++ if (!task_function_call(task, __perf_event_period, &pe))
++ return 0;
++
++ raw_spin_lock_irq(&ctx->lock);
++ if (ctx->is_active) {
++ raw_spin_unlock_irq(&ctx->lock);
++ task = ctx->task;
++ goto retry;
++ }
+
+-unlock:
++ __perf_event_period(&pe);
+ raw_spin_unlock_irq(&ctx->lock);
+
+- return ret;
++ return 0;
+ }
+
+ static const struct file_operations perf_fops;
ipc-sem.c-update-correct-memory-barriers.patch
mm-hwpoison-fix-page-refcount-of-unknown-non-lru-page.patch
xen-blkfront-don-t-add-indirect-pages-to-list-when.patch
+perf-fix-fasync-handling-on-inherited-events.patch
+perf-fix-perf_event_ioc_period-migration-race.patch
+dm-thin-metadata-delete-btrees-when-releasing-metadata-snapshot.patch
+localmodconfig-use-kbuild-files-too.patch
+edac-ppc4xx-access-mci-csrows-array-elements-properly.patch
+drm-radeon-add-new-oland-pci-id.patch
+drm-vmwgfx-fix-execbuf-locking-issues.patch