--- /dev/null
+From 9dd4b2944c46e1fdbd0a516c221c8a2670cbf005 Mon Sep 17 00:00:00 2001
+From: Rob Herring <rob.herring@calxeda.com>
+Date: Thu, 29 Aug 2013 07:43:52 -0500
+Subject: ARM: xen: only set pm function ptrs for Xen guests
+
+From: Rob Herring <rob.herring@calxeda.com>
+
+commit 9dd4b2944c46e1fdbd0a516c221c8a2670cbf005 upstream.
+
+xen_pm_init was unconditionally setting pm_power_off and arm_pm_restart
+function pointers. This breaks multi-platform kernels. Make this
+conditional on running as a Xen guest and make it a late_initcall to
+ensure it is setup after platform code for Dom0.
+
+Signed-off-by: Rob Herring <rob.herring@calxeda.com>
+Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/xen/enlighten.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -273,12 +273,15 @@ core_initcall(xen_guest_init);
+
+ static int __init xen_pm_init(void)
+ {
++ if (!xen_domain())
++ return -ENODEV;
++
+ pm_power_off = xen_power_off;
+ arm_pm_restart = xen_restart;
+
+ return 0;
+ }
+-subsys_initcall(xen_pm_init);
++late_initcall(xen_pm_init);
+
+ static irqreturn_t xen_arm_callback(int irq, void *arg)
+ {
--- /dev/null
+From 77dbd7a95e4a4f15264c333a9e9ab97ee27dc2aa Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Sun, 8 Sep 2013 14:33:50 +1000
+Subject: crypto: api - Fix race condition in larval lookup
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 77dbd7a95e4a4f15264c333a9e9ab97ee27dc2aa upstream.
+
+crypto_larval_lookup should only return a larval if it created one.
+Any larval created by another entity must be processed through
+crypto_larval_wait before being returned.
+
+Otherwise this will lead to a larval being killed twice, which
+will most likely lead to a crash.
+
+Reported-by: Kees Cook <keescook@chromium.org>
+Tested-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/api.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -34,6 +34,8 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
+ BLOCKING_NOTIFIER_HEAD(crypto_chain);
+ EXPORT_SYMBOL_GPL(crypto_chain);
+
++static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
++
+ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
+ {
+ return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
+@@ -144,8 +146,11 @@ static struct crypto_alg *crypto_larval_
+ }
+ up_write(&crypto_alg_sem);
+
+- if (alg != &larval->alg)
++ if (alg != &larval->alg) {
+ kfree(larval);
++ if (crypto_is_larval(alg))
++ alg = crypto_larval_wait(alg);
++ }
+
+ return alg;
+ }
--- /dev/null
+From 1eeeef153c02f5856ec109fa532eb5f31c39f85c Mon Sep 17 00:00:00 2001
+From: Maxime Bizon <mbizon@freebox.fr>
+Date: Thu, 29 Aug 2013 20:28:13 +0200
+Subject: firmware loader: fix pending_fw_head list corruption
+
+From: Maxime Bizon <mbizon@freebox.fr>
+
+commit 1eeeef153c02f5856ec109fa532eb5f31c39f85c upstream.
+
+Got the following oops just before reboot:
+
+Unable to handle kernel NULL pointer dereference at virtual address 00000000
+[<8028d300>] (__list_del_entry+0x44/0xac)
+[<802e3320>] (__fw_load_abort.part.13+0x1c/0x50)
+[<802e337c>] (fw_shutdown_notify+0x28/0x50)
+[<80034f80>] (notifier_call_chain.isra.1+0x5c/0x9c)
+[<800350ec>] (__blocking_notifier_call_chain+0x44/0x58)
+[<80035114>] (blocking_notifier_call_chain+0x14/0x18)
+[<80035d64>] (kernel_restart_prepare+0x14/0x38)
+[<80035d94>] (kernel_restart+0xc/0x50)
+
+The following race condition triggers here:
+
+ _request_firmware_load()
+ device_create_file(...)
+ kobject_uevent(...)
+ (schedule)
+ (resume)
+ firmware_loading_store(1)
+ firmware_loading_store(0)
+ list_del_init(&buf->pending_list)
+ (schedule)
+ (resume)
+ list_add(&buf->pending_list, &pending_fw_head);
+ wait_for_completion(&buf->completion);
+
+causing an oops later when walking pending_list after the firmware has
+been released.
+
+The proposed fix is to move the list_add() before sysfs attribute
+creation.
+
+Signed-off-by: Maxime Bizon <mbizon@freebox.fr>
+Acked-by: Ming Lei <ming.lei@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/firmware_class.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -868,8 +868,15 @@ static int _request_firmware_load(struct
+ goto err_del_dev;
+ }
+
++ mutex_lock(&fw_lock);
++ list_add(&buf->pending_list, &pending_fw_head);
++ mutex_unlock(&fw_lock);
++
+ retval = device_create_file(f_dev, &dev_attr_loading);
+ if (retval) {
++ mutex_lock(&fw_lock);
++ list_del_init(&buf->pending_list);
++ mutex_unlock(&fw_lock);
+ dev_err(f_dev, "%s: device_create_file failed\n", __func__);
+ goto err_del_bin_attr;
+ }
+@@ -884,10 +891,6 @@ static int _request_firmware_load(struct
+ kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
+ }
+
+- mutex_lock(&fw_lock);
+- list_add(&buf->pending_list, &pending_fw_head);
+- mutex_unlock(&fw_lock);
+-
+ wait_for_completion(&buf->completion);
+
+ cancel_delayed_work_sync(&fw_priv->timeout_work);
--- /dev/null
+From 363edbe2614aa90df706c0f19ccfa2a6c06af0be Mon Sep 17 00:00:00 2001
+From: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
+Date: Fri, 6 Sep 2013 00:25:06 +0530
+Subject: powerpc: Default arch idle could cede processor on pseries
+
+From: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
+
+commit 363edbe2614aa90df706c0f19ccfa2a6c06af0be upstream.
+
+When adding cpuidle support to pSeries, we introduced two
+regressions:
+
+ - The new cpuidle backend driver only works under hypervisors
+ supporting the "SLPLAR" option, which isn't the case of the
+ old POWER4 hypervisor and the HV "light" used on js2x blades
+
+ - The cpuidle driver registers fairly late, meaning that for
+ a significant portion of the boot process, we end up having
+ all threads spinning. This slows down the boot process and
+ increases the overall resource usage if the hypervisor has
+ shared processors.
+
+This fixes both by implementing a "default" idle that will cede
+to the hypervisor when possible, in a very simple way without
+all the bells and whisles of cpuidle.
+
+Reported-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
+Acked-by: Deepthi Dharwar <deepthi@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/setup.c | 31 +++++++++++++++++++++----------
+ 1 file changed, 21 insertions(+), 10 deletions(-)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -354,7 +354,7 @@ static int alloc_dispatch_log_kmem_cache
+ }
+ early_initcall(alloc_dispatch_log_kmem_cache);
+
+-static void pSeries_idle(void)
++static void pseries_lpar_idle(void)
+ {
+ /* This would call on the cpuidle framework, and the back-end pseries
+ * driver to go to idle states
+@@ -362,10 +362,22 @@ static void pSeries_idle(void)
+ if (cpuidle_idle_call()) {
+ /* On error, execute default handler
+ * to go into low thread priority and possibly
+- * low power mode.
++ * low power mode by cedeing processor to hypervisor
+ */
+- HMT_low();
+- HMT_very_low();
++
++ /* Indicate to hypervisor that we are idle. */
++ get_lppaca()->idle = 1;
++
++ /*
++ * Yield the processor to the hypervisor. We return if
++ * an external interrupt occurs (which are driven prior
++ * to returning here) or if a prod occurs from another
++ * processor. When returning here, external interrupts
++ * are enabled.
++ */
++ cede_processor();
++
++ get_lppaca()->idle = 0;
+ }
+ }
+
+@@ -456,15 +468,14 @@ static void __init pSeries_setup_arch(vo
+
+ pSeries_nvram_init();
+
+- if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
++ if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ vpa_init(boot_cpuid);
+- ppc_md.power_save = pSeries_idle;
+- }
+-
+- if (firmware_has_feature(FW_FEATURE_LPAR))
++ ppc_md.power_save = pseries_lpar_idle;
+ ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
+- else
++ } else {
++ /* No special idle routine */
+ ppc_md.enable_pmcs = power4_enable_pmcs;
++ }
+
+ ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
+
--- /dev/null
+From 230aef7a6a23b6166bd4003bfff5af23c9bd381f Mon Sep 17 00:00:00 2001
+From: Anton Blanchard <anton@samba.org>
+Date: Wed, 7 Aug 2013 02:01:19 +1000
+Subject: powerpc: Handle unaligned ldbrx/stdbrx
+
+From: Anton Blanchard <anton@samba.org>
+
+commit 230aef7a6a23b6166bd4003bfff5af23c9bd381f upstream.
+
+Normally when we haven't implemented an alignment handler for
+a load or store instruction the process will be terminated.
+
+The alignment handler uses the DSISR (or a pseudo one) to locate
+the right handler. Unfortunately ldbrx and stdbrx overlap lfs and
+stfs so we incorrectly think ldbrx is an lfs and stdbrx is an
+stfs.
+
+This bug is particularly nasty - instead of terminating the
+process we apply an incorrect fixup and continue on.
+
+With more and more overlapping instructions we should stop
+creating a pseudo DSISR and index using the instruction directly,
+but for now add a special case to catch ldbrx/stdbrx.
+
+Signed-off-by: Anton Blanchard <anton@samba.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/align.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
+ nb = aligninfo[instr].len;
+ flags = aligninfo[instr].flags;
+
++ /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
++ if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
++ nb = 8;
++ flags = LD+SW;
++ } else if (IS_XFORM(instruction) &&
++ ((instruction >> 1) & 0x3ff) == 660) {
++ nb = 8;
++ flags = ST+SW;
++ }
++
+ /* Byteswap little endian loads and stores */
+ swiz = 0;
+ if (regs->msr & MSR_LE) {
--- /dev/null
+From 4784955a5270f30c569fa95899979fd1805caf6c Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Mon, 2 Sep 2013 13:08:25 +0200
+Subject: s390/bpf,jit: fix address randomization
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit 4784955a5270f30c569fa95899979fd1805caf6c upstream.
+
+Add misssing braces to hole calculation. This resulted in an addition
+instead of an substraction. Which in turn means that the jit compiler
+could try to write out of bounds of the allocated piece of memory.
+
+This bug was introduced with aa2d2c73 "s390/bpf,jit: address randomize
+and write protect jit code".
+
+Fixes this one:
+
+[ 37.320956] Unable to handle kernel pointer dereference at virtual kernel address 000003ff80231000
+[ 37.320984] Oops: 0011 [#1] PREEMPT SMP DEBUG_PAGEALLOC
+[ 37.320993] Modules linked in: dm_multipath scsi_dh eadm_sch dm_mod ctcm fsm autofs4
+[ 37.321007] CPU: 28 PID: 6443 Comm: multipathd Not tainted 3.10.9-61.x.20130829-s390xdefault #1
+[ 37.321011] task: 0000004ada778000 ti: 0000004ae3304000 task.ti: 0000004ae3304000
+[ 37.321014] Krnl PSW : 0704c00180000000 000000000012d1de (bpf_jit_compile+0x198e/0x23d0)
+[ 37.321022] R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
+ Krnl GPRS: 000000004350207d 0000004a00000001 0000000000000007 000003ff80231002
+[ 37.321029] 0000000000000007 000003ff80230ffe 00000000a7740000 000003ff80230f76
+[ 37.321032] 000003ffffffffff 000003ff00000000 000003ff0000007d 000000000071e820
+[ 37.321035] 0000004adbe99950 000000000071ea18 0000004af3d9e7c0 0000004ae3307b80
+[ 37.321046] Krnl Code: 000000000012d1d0: 41305004 la %r3,4(%r5)
+ 000000000012d1d4: e330f0f80021 clg %r3,248(%r15)
+ #000000000012d1da: a7240009 brc 2,12d1ec
+ >000000000012d1de: 50805000 st %r8,0(%r5)
+ 000000000012d1e2: e330f0f00004 lg %r3,240(%r15)
+ 000000000012d1e8: 41303004 la %r3,4(%r3)
+ 000000000012d1ec: e380f0e00004 lg %r8,224(%r15)
+ 000000000012d1f2: e330f0f00024 stg %r3,240(%r15)
+[ 37.321074] Call Trace:
+[ 37.321077] ([<000000000012da78>] bpf_jit_compile+0x2228/0x23d0)
+[ 37.321083] [<00000000006007c2>] sk_attach_filter+0xfe/0x214
+[ 37.321090] [<00000000005d2d92>] sock_setsockopt+0x926/0xbdc
+[ 37.321097] [<00000000005cbfb6>] SyS_setsockopt+0x8a/0xe8
+[ 37.321101] [<00000000005ccaa8>] SyS_socketcall+0x264/0x364
+[ 37.321106] [<0000000000713f1c>] sysc_nr_ok+0x22/0x28
+[ 37.321113] [<000003fffce10ea8>] 0x3fffce10ea8
+[ 37.321118] INFO: lockdep is turned off.
+[ 37.321121] Last Breaking-Event-Address:
+[ 37.321124] [<000000000012d192>] bpf_jit_compile+0x1942/0x23d0
+[ 37.321132]
+[ 37.321135] Kernel panic - not syncing: Fatal exception: panic_on_oops
+
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/net/bpf_jit_comp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -805,7 +805,7 @@ static struct bpf_binary_header *bpf_all
+ return NULL;
+ memset(header, 0, sz);
+ header->pages = sz / PAGE_SIZE;
+- hole = sz - bpfsize + sizeof(*header);
++ hole = sz - (bpfsize + sizeof(*header));
+ /* Insert random number of illegal instructions before BPF code
+ * and make sure the first instruction starts at an even address.
+ */
--- /dev/null
+From 984f1733fcee3fbc78d47e26c5096921c5d9946a Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Fri, 6 Sep 2013 11:49:51 -0400
+Subject: SCSI: sd: Fix potential out-of-bounds access
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit 984f1733fcee3fbc78d47e26c5096921c5d9946a upstream.
+
+This patch fixes an out-of-bounds error in sd_read_cache_type(), found
+by Google's AddressSanitizer tool. When the loop ends, we know that
+"offset" lies beyond the end of the data in the buffer, so no Caching
+mode page was found. In theory it may be present, but the buffer size
+is limited to 512 bytes.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: James Bottomley <JBottomley@Parallels.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/sd.c | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2419,14 +2419,9 @@ sd_read_cache_type(struct scsi_disk *sdk
+ }
+ }
+
+- if (modepage == 0x3F) {
+- sd_printk(KERN_ERR, sdkp, "No Caching mode page "
+- "present\n");
+- goto defaults;
+- } else if ((buffer[offset] & 0x3f) != modepage) {
+- sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
+- goto defaults;
+- }
++ sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
++ goto defaults;
++
+ Page_found:
+ if (modepage == 8) {
+ sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
xtensa-fix-broken-allmodconfig-build.patch
scsi-allow-mpt-fusion-sas-3.0-driver-to-be-built-into-the-kernel.patch
drm-i915-make-user-mode-sync-polarity-setting-explicit.patch
+firmware-loader-fix-pending_fw_head-list-corruption.patch
+zram-fix-invalid-memory-access.patch
+zram-don-t-grab-mutex-in-zram_slot_free_noity.patch
+ubi-fix-peb-leak-in-wear_leveling_worker.patch
+scsi-sd-fix-potential-out-of-bounds-access.patch
+crypto-api-fix-race-condition-in-larval-lookup.patch
+s390-bpf-jit-fix-address-randomization.patch
+powerpc-handle-unaligned-ldbrx-stdbrx.patch
+powerpc-default-arch-idle-could-cede-processor-on-pseries.patch
+xen-gnt-prevent-adding-duplicate-gnt-callbacks.patch
+arm-xen-only-set-pm-function-ptrs-for-xen-guests.patch
--- /dev/null
+From 5ef4414f4bc26a19cfd5cd11aee9697a863e4d51 Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Mon, 19 Aug 2013 08:48:12 +0200
+Subject: UBI: Fix PEB leak in wear_leveling_worker()
+
+From: Richard Weinberger <richard@nod.at>
+
+commit 5ef4414f4bc26a19cfd5cd11aee9697a863e4d51 upstream.
+
+get_peb_for_wl() removes the PEB from the free list.
+If the WL subsystem detects that no wear leveling is needed
+it cancels the operation and drops the gained PEB.
+In this case we have to put the PEB back into the free list.
+
+This issue was introduced with commit ed4b7021c
+(UBI: remove PEB from free tree in get_peb_for_wl()).
+
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/wl.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1069,6 +1069,9 @@ static int wear_leveling_worker(struct u
+ if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ dbg_wl("no WL needed: min used EC %d, max free EC %d",
+ e1->ec, e2->ec);
++
++ /* Give the unused PEB back */
++ wl_tree_add(e2, &ubi->free);
+ goto out_cancel;
+ }
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
--- /dev/null
+From 5f338d9001094a56cf87bd8a280b4e7ff953bb59 Mon Sep 17 00:00:00 2001
+From: Roger Pau Monne <roger.pau@citrix.com>
+Date: Wed, 31 Jul 2013 17:00:42 +0200
+Subject: xen-gnt: prevent adding duplicate gnt callbacks
+
+From: Roger Pau Monne <roger.pau@citrix.com>
+
+commit 5f338d9001094a56cf87bd8a280b4e7ff953bb59 upstream.
+
+With the current implementation, the callback in the tail of the list
+can be added twice, because the check done in
+gnttab_request_free_callback is bogus, callback->next can be NULL if
+it is the last callback in the list. If we add the same callback twice
+we end up with an infinite loop, were callback == callback->next.
+
+Replace this check with a proper one that iterates over the list to
+see if the callback has already been added.
+
+Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Matt Wilson <msw@amazon.com>
+Reviewed-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/grant-table.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -730,9 +730,18 @@ void gnttab_request_free_callback(struct
+ void (*fn)(void *), void *arg, u16 count)
+ {
+ unsigned long flags;
++ struct gnttab_free_callback *cb;
++
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+- if (callback->next)
+- goto out;
++
++ /* Check if the callback is already on the list */
++ cb = gnttab_free_callback_list;
++ while (cb) {
++ if (cb == callback)
++ goto out;
++ cb = cb->next;
++ }
++
+ callback->fn = fn;
+ callback->arg = arg;
+ callback->count = count;
--- /dev/null
+From a0c516cbfc7452c8cbd564525fef66d9f20b46d1 Mon Sep 17 00:00:00 2001
+From: Minchan Kim <minchan@kernel.org>
+Date: Mon, 12 Aug 2013 15:13:56 +0900
+Subject: zram: don't grab mutex in zram_slot_free_noity
+
+From: Minchan Kim <minchan@kernel.org>
+
+commit a0c516cbfc7452c8cbd564525fef66d9f20b46d1 upstream.
+
+[1] introduced down_write in zram_slot_free_notify to prevent race
+between zram_slot_free_notify and zram_bvec_[read|write]. The race
+could happen if somebody who has right permission to open swap device
+is reading swap device while it is used by swap in parallel.
+
+However, zram_slot_free_notify is called with holding spin_lock of
+swap layer so we shouldn't avoid holing mutex. Otherwise, lockdep
+warns it.
+
+This patch adds new list to handle free slot and workqueue
+so zram_slot_free_notify just registers slot index to be freed and
+registers the request to workqueue. If workqueue is expired,
+it holds mutex_lock so there is no problem any more.
+
+If any I/O is issued, zram handles pending slot-free request
+caused by zram_slot_free_notify right before handling issued
+request because workqueue wouldn't be expired yet so zram I/O
+request handling function can miss it.
+
+Lastly, when zram is reset, flush_work could handle all of pending
+free request so we shouldn't have memory leak.
+
+NOTE: If zram_slot_free_notify's kmalloc with GFP_ATOMIC would be
+failed, the slot will be freed when next write I/O write the slot.
+
+[1] [57ab0485, zram: use zram->lock to protect zram_free_page()
+ in swap free notify path]
+
+* from v2
+ * refactoring
+
+* from v1
+ * totally redesign
+
+Cc: Nitin Gupta <ngupta@vflare.org>
+Cc: Jiang Liu <jiang.liu@huawei.com>
+Signed-off-by: Minchan Kim <minchan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/zram/zram_drv.c | 60 ++++++++++++++++++++++++++++++++++++++--
+ drivers/staging/zram/zram_drv.h | 10 ++++++
+ 2 files changed, 67 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -445,6 +445,14 @@ static int zram_bvec_write(struct zram *
+ goto out;
+ }
+
++ /*
++ * zram_slot_free_notify could miss free so that let's
++ * double check.
++ */
++ if (unlikely(meta->table[index].handle ||
++ zram_test_flag(meta, index, ZRAM_ZERO)))
++ zram_free_page(zram, index);
++
+ ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
+ meta->compress_workmem);
+
+@@ -504,6 +512,20 @@ out:
+ return ret;
+ }
+
++static void handle_pending_slot_free(struct zram *zram)
++{
++ struct zram_slot_free *free_rq;
++
++ spin_lock(&zram->slot_free_lock);
++ while (zram->slot_free_rq) {
++ free_rq = zram->slot_free_rq;
++ zram->slot_free_rq = free_rq->next;
++ zram_free_page(zram, free_rq->index);
++ kfree(free_rq);
++ }
++ spin_unlock(&zram->slot_free_lock);
++}
++
+ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
+ int offset, struct bio *bio, int rw)
+ {
+@@ -511,10 +533,12 @@ static int zram_bvec_rw(struct zram *zra
+
+ if (rw == READ) {
+ down_read(&zram->lock);
++ handle_pending_slot_free(zram);
+ ret = zram_bvec_read(zram, bvec, index, offset, bio);
+ up_read(&zram->lock);
+ } else {
+ down_write(&zram->lock);
++ handle_pending_slot_free(zram);
+ ret = zram_bvec_write(zram, bvec, index, offset);
+ up_write(&zram->lock);
+ }
+@@ -527,6 +551,8 @@ static void zram_reset_device(struct zra
+ size_t index;
+ struct zram_meta *meta;
+
++ flush_work(&zram->free_work);
++
+ down_write(&zram->init_lock);
+ if (!zram->init_done) {
+ up_write(&zram->init_lock);
+@@ -721,16 +747,40 @@ error:
+ bio_io_error(bio);
+ }
+
++static void zram_slot_free(struct work_struct *work)
++{
++ struct zram *zram;
++
++ zram = container_of(work, struct zram, free_work);
++ down_write(&zram->lock);
++ handle_pending_slot_free(zram);
++ up_write(&zram->lock);
++}
++
++static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
++{
++ spin_lock(&zram->slot_free_lock);
++ free_rq->next = zram->slot_free_rq;
++ zram->slot_free_rq = free_rq;
++ spin_unlock(&zram->slot_free_lock);
++}
++
+ static void zram_slot_free_notify(struct block_device *bdev,
+ unsigned long index)
+ {
+ struct zram *zram;
++ struct zram_slot_free *free_rq;
+
+ zram = bdev->bd_disk->private_data;
+- down_write(&zram->lock);
+- zram_free_page(zram, index);
+- up_write(&zram->lock);
+ atomic64_inc(&zram->stats.notify_free);
++
++ free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
++ if (!free_rq)
++ return;
++
++ free_rq->index = index;
++ add_slot_free(zram, free_rq);
++ schedule_work(&zram->free_work);
+ }
+
+ static const struct block_device_operations zram_devops = {
+@@ -777,6 +827,10 @@ static int create_device(struct zram *zr
+ init_rwsem(&zram->lock);
+ init_rwsem(&zram->init_lock);
+
++ INIT_WORK(&zram->free_work, zram_slot_free);
++ spin_lock_init(&zram->slot_free_lock);
++ zram->slot_free_rq = NULL;
++
+ zram->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!zram->queue) {
+ pr_err("Error allocating disk queue for device %d\n",
+--- a/drivers/staging/zram/zram_drv.h
++++ b/drivers/staging/zram/zram_drv.h
+@@ -94,11 +94,20 @@ struct zram_meta {
+ struct zs_pool *mem_pool;
+ };
+
++struct zram_slot_free {
++ unsigned long index;
++ struct zram_slot_free *next;
++};
++
+ struct zram {
+ struct zram_meta *meta;
+ struct rw_semaphore lock; /* protect compression buffers, table,
+ * 32bit stat counters against concurrent
+ * notifications, reads and writes */
++
++ struct work_struct free_work; /* handle pending free request */
++ struct zram_slot_free *slot_free_rq; /* list head of free request */
++
+ struct request_queue *queue;
+ struct gendisk *disk;
+ int init_done;
+@@ -109,6 +118,7 @@ struct zram {
+ * we can store in a disk.
+ */
+ u64 disksize; /* bytes */
++ spinlock_t slot_free_lock;
+
+ struct zram_stats stats;
+ };
--- /dev/null
+From 2b86ab9cc29fcd435cde9378c3b9ffe8b5c76128 Mon Sep 17 00:00:00 2001
+From: Minchan Kim <minchan@kernel.org>
+Date: Mon, 12 Aug 2013 15:13:55 +0900
+Subject: zram: fix invalid memory access
+
+From: Minchan Kim <minchan@kernel.org>
+
+commit 2b86ab9cc29fcd435cde9378c3b9ffe8b5c76128 upstream.
+
+[1] tried to fix invalid memory access on zram->disk but it didn't
+fix properly because get_disk failed during module exit path.
+
+Actually, we don't need to reset zram->disk's capacity to zero
+in module exit path so that this patch introduces new argument
+"reset_capacity" on zram_reset_divice and it only reset it when
+reset_store is called.
+
+[1] 6030ea9b, zram: avoid invalid memory access in zram_exit()
+
+Cc: Nitin Gupta <ngupta@vflare.org>
+Cc: Jiang Liu <jiang.liu@huawei.com>
+Signed-off-by: Minchan Kim <minchan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/zram/zram_drv.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -522,7 +522,7 @@ static int zram_bvec_rw(struct zram *zra
+ return ret;
+ }
+
+-static void zram_reset_device(struct zram *zram)
++static void zram_reset_device(struct zram *zram, bool reset_capacity)
+ {
+ size_t index;
+ struct zram_meta *meta;
+@@ -551,7 +551,8 @@ static void zram_reset_device(struct zra
+ memset(&zram->stats, 0, sizeof(zram->stats));
+
+ zram->disksize = 0;
+- set_capacity(zram->disk, 0);
++ if (reset_capacity)
++ set_capacity(zram->disk, 0);
+ up_write(&zram->init_lock);
+ }
+
+@@ -635,7 +636,7 @@ static ssize_t reset_store(struct device
+ if (bdev)
+ fsync_bdev(bdev);
+
+- zram_reset_device(zram);
++ zram_reset_device(zram, true);
+ return len;
+ }
+
+@@ -902,10 +903,12 @@ static void __exit zram_exit(void)
+ for (i = 0; i < num_devices; i++) {
+ zram = &zram_devices[i];
+
+- get_disk(zram->disk);
+ destroy_device(zram);
+- zram_reset_device(zram);
+- put_disk(zram->disk);
++ /*
++ * Shouldn't access zram->disk after destroy_device
++ * because destroy_device already released zram->disk.
++ */
++ zram_reset_device(zram, false);
+ }
+
+ unregister_blkdev(zram_major, "zram");