]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 13 Jan 2015 21:48:58 +0000 (13:48 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 13 Jan 2015 21:48:58 +0000 (13:48 -0800)
added patches:
brcmfmac-fix-ifidx-for-rx-data-by-msgbuf.patch
iommu-vt-d-fix-an-off-by-one-bug-in-__domain_mapping.patch
iommu-vt-d-fix-dmar_domain-leak-in-iommu_attach_device.patch
pstore-ram-allow-optional-mapping-with-pgprot_noncached.patch
pstore-ram-fix-hangs-by-using-write-combine-mappings.patch
ubi-fix-double-free-after-do_sync_erase.patch
ubi-fix-invalid-vfree.patch

queue-3.18/brcmfmac-fix-ifidx-for-rx-data-by-msgbuf.patch [new file with mode: 0644]
queue-3.18/iommu-vt-d-fix-an-off-by-one-bug-in-__domain_mapping.patch [new file with mode: 0644]
queue-3.18/iommu-vt-d-fix-dmar_domain-leak-in-iommu_attach_device.patch [new file with mode: 0644]
queue-3.18/pstore-ram-allow-optional-mapping-with-pgprot_noncached.patch [new file with mode: 0644]
queue-3.18/pstore-ram-fix-hangs-by-using-write-combine-mappings.patch [new file with mode: 0644]
queue-3.18/series
queue-3.18/ubi-fix-double-free-after-do_sync_erase.patch [new file with mode: 0644]
queue-3.18/ubi-fix-invalid-vfree.patch [new file with mode: 0644]

diff --git a/queue-3.18/brcmfmac-fix-ifidx-for-rx-data-by-msgbuf.patch b/queue-3.18/brcmfmac-fix-ifidx-for-rx-data-by-msgbuf.patch
new file mode 100644 (file)
index 0000000..a5cafb1
--- /dev/null
@@ -0,0 +1,43 @@
+From 94a612086f5e78272e831c04b673778f8546ea73 Mon Sep 17 00:00:00 2001
+From: Hante Meuleman <meuleman@broadcom.com>
+Date: Wed, 3 Dec 2014 21:05:28 +0100
+Subject: brcmfmac: Fix ifidx for rx data by msgbuf.
+
+From: Hante Meuleman <meuleman@broadcom.com>
+
+commit 94a612086f5e78272e831c04b673778f8546ea73 upstream.
+
+The ifidx provided by FW needs to be offsetted when receiving data
+packets.
+
+Reviewed-by: Arend Van Spriel <arend@broadcom.com>
+Reviewed-by: Pieter-Paul Giesberts <pieterpg@broadcom.com>
+Signed-off-by: Hante Meuleman <meuleman@broadcom.com>
+Signed-off-by: Arend van Spriel <arend@broadcom.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+@@ -1081,8 +1081,17 @@ brcmf_msgbuf_rx_skb(struct brcmf_msgbuf
+ {
+       struct brcmf_if *ifp;
++      /* The ifidx is the idx to map to matching netdev/ifp. When receiving
++       * events this is easy because it contains the bssidx which maps
++       * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
++       * bssidx 1 is used for p2p0 and no data can be received or
++       * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
++       */
++      if (ifidx)
++              (ifidx)++;
+       ifp = msgbuf->drvr->iflist[ifidx];
+       if (!ifp || !ifp->ndev) {
++              brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
+               brcmu_pkt_buf_free_skb(skb);
+               return;
+       }
diff --git a/queue-3.18/iommu-vt-d-fix-an-off-by-one-bug-in-__domain_mapping.patch b/queue-3.18/iommu-vt-d-fix-an-off-by-one-bug-in-__domain_mapping.patch
new file mode 100644 (file)
index 0000000..0b12f34
--- /dev/null
@@ -0,0 +1,54 @@
+From cc4f14aa170d895c9a43bdb56f62070c8a6da908 Mon Sep 17 00:00:00 2001
+From: Jiang Liu <jiang.liu@linux.intel.com>
+Date: Wed, 26 Nov 2014 09:42:10 +0800
+Subject: iommu/vt-d: Fix an off-by-one bug in __domain_mapping()
+
+From: Jiang Liu <jiang.liu@linux.intel.com>
+
+commit cc4f14aa170d895c9a43bdb56f62070c8a6da908 upstream.
+
+There's an off-by-one bug in function __domain_mapping(), which may
+trigger the BUG_ON(nr_pages < lvl_pages) when
+       (nr_pages + 1) & superpage_mask == 0
+
+The issue was introduced by commit 9051aa0268dc "intel-iommu: Combine
+domain_pfn_mapping() and domain_sg_mapping()", which sets sg_res to
+"nr_pages + 1" to avoid some of the 'sg_res==0' code paths.
+
+It's safe to remove extra "+1" because sg_res is only used to calculate
+page size now.
+
+Reported-And-Tested-by: Sudeep Dutt <sudeep.dutt@intel.com>
+Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
+Acked-By: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/intel-iommu.c |    8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1983,7 +1983,7 @@ static int __domain_mapping(struct dmar_
+ {
+       struct dma_pte *first_pte = NULL, *pte = NULL;
+       phys_addr_t uninitialized_var(pteval);
+-      unsigned long sg_res;
++      unsigned long sg_res = 0;
+       unsigned int largepage_lvl = 0;
+       unsigned long lvl_pages = 0;
+@@ -1994,10 +1994,8 @@ static int __domain_mapping(struct dmar_
+       prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
+-      if (sg)
+-              sg_res = 0;
+-      else {
+-              sg_res = nr_pages + 1;
++      if (!sg) {
++              sg_res = nr_pages;
+               pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
+       }
diff --git a/queue-3.18/iommu-vt-d-fix-dmar_domain-leak-in-iommu_attach_device.patch b/queue-3.18/iommu-vt-d-fix-dmar_domain-leak-in-iommu_attach_device.patch
new file mode 100644 (file)
index 0000000..3707ba1
--- /dev/null
@@ -0,0 +1,41 @@
+From 62c22167dd70b730f61c2b88f950e98154a87980 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Tue, 9 Dec 2014 12:56:45 +0100
+Subject: iommu/vt-d: Fix dmar_domain leak in iommu_attach_device
+
+From: Joerg Roedel <jroedel@suse.de>
+
+commit 62c22167dd70b730f61c2b88f950e98154a87980 upstream.
+
+Since commit 1196c2f a domain is only destroyed in the
+notifier path if it is hot-unplugged. This caused a
+domain leakage in iommu_attach_device when a driver was
+unbound from the device and bound to VFIO. In this case the
+device is attached to a new domain and unlinked from the old
+domain. At this point nothing points to the old domain
+anymore and its memory is leaked.
+Fix this by explicitly freeing the old domain in
+iommu_attach_domain.
+
+Fixes: 1196c2f (iommu/vt-d: Fix dmar_domain leak in iommu_attach_device)
+Tested-by: Jerry Hoemann <jerry.hoemann@hp.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/intel-iommu.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4265,6 +4265,10 @@ static int intel_iommu_attach_device(str
+                               domain_remove_one_dev_info(old_domain, dev);
+                       else
+                               domain_remove_dev_info(old_domain);
++
++                      if (!domain_type_is_vm_or_si(old_domain) &&
++                           list_empty(&old_domain->devices))
++                              domain_exit(old_domain);
+               }
+       }
diff --git a/queue-3.18/pstore-ram-allow-optional-mapping-with-pgprot_noncached.patch b/queue-3.18/pstore-ram-allow-optional-mapping-with-pgprot_noncached.patch
new file mode 100644 (file)
index 0000000..62377f7
--- /dev/null
@@ -0,0 +1,233 @@
+From 027bc8b08242c59e19356b4b2c189f2d849ab660 Mon Sep 17 00:00:00 2001
+From: Tony Lindgren <tony@atomide.com>
+Date: Tue, 16 Sep 2014 13:50:01 -0700
+Subject: pstore-ram: Allow optional mapping with pgprot_noncached
+
+From: Tony Lindgren <tony@atomide.com>
+
+commit 027bc8b08242c59e19356b4b2c189f2d849ab660 upstream.
+
+On some ARMs the memory can be mapped pgprot_noncached() and still
+be working for atomic operations. As pointed out by Colin Cross
+<ccross@android.com>, in some cases you do want to use
+pgprot_noncached() if the SoC supports it to see a debug printk
+just before a write hanging the system.
+
+On ARMs, the atomic operations on strongly ordered memory are
+implementation defined. So let's provide an optional kernel parameter
+for configuring pgprot_noncached(), and use pgprot_writecombine() by
+default.
+
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Rob Herring <robherring2@gmail.com>
+Cc: Randy Dunlap <rdunlap@infradead.org>
+Cc: Anton Vorontsov <anton@enomsg.org>
+Cc: Colin Cross <ccross@android.com>
+Cc: Olof Johansson <olof@lixom.net>
+Cc: Russell King <linux@arm.linux.org.uk>
+Acked-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/ramoops.txt  |   13 +++++++++++--
+ fs/pstore/ram.c            |   13 +++++++++++--
+ fs/pstore/ram_core.c       |   31 ++++++++++++++++++++++---------
+ include/linux/pstore_ram.h |    4 +++-
+ 4 files changed, 47 insertions(+), 14 deletions(-)
+
+--- a/Documentation/ramoops.txt
++++ b/Documentation/ramoops.txt
+@@ -14,11 +14,19 @@ survive after a restart.
+ 1. Ramoops concepts
+-Ramoops uses a predefined memory area to store the dump. The start and size of
+-the memory area are set using two variables:
++Ramoops uses a predefined memory area to store the dump. The start and size
++and type of the memory area are set using three variables:
+   * "mem_address" for the start
+   * "mem_size" for the size. The memory size will be rounded down to a
+   power of two.
++  * "mem_type" to specifiy if the memory type (default is pgprot_writecombine).
++
++Typically the default value of mem_type=0 should be used as that sets the pstore
++mapping to pgprot_writecombine. Setting mem_type=1 attempts to use
++pgprot_noncached, which only works on some platforms. This is because pstore
++depends on atomic operations. At least on ARM, pgprot_noncached causes the
++memory to be mapped strongly ordered, and atomic operations on strongly ordered
++memory are implementation defined, and won't work on many ARMs such as omaps.
+ The memory area is divided into "record_size" chunks (also rounded down to
+ power of two) and each oops/panic writes a "record_size" chunk of
+@@ -55,6 +63,7 @@ Setting the ramoops parameters can be do
+ static struct ramoops_platform_data ramoops_data = {
+         .mem_size               = <...>,
+         .mem_address            = <...>,
++        .mem_type               = <...>,
+         .record_size            = <...>,
+         .dump_oops              = <...>,
+         .ecc                    = <...>,
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -61,6 +61,11 @@ module_param(mem_size, ulong, 0400);
+ MODULE_PARM_DESC(mem_size,
+               "size of reserved RAM used to store oops/panic logs");
++static unsigned int mem_type;
++module_param(mem_type, uint, 0600);
++MODULE_PARM_DESC(mem_type,
++              "set to 1 to try to use unbuffered memory (default 0)");
++
+ static int dump_oops = 1;
+ module_param(dump_oops, int, 0600);
+ MODULE_PARM_DESC(dump_oops,
+@@ -79,6 +84,7 @@ struct ramoops_context {
+       struct persistent_ram_zone *fprz;
+       phys_addr_t phys_addr;
+       unsigned long size;
++      unsigned int memtype;
+       size_t record_size;
+       size_t console_size;
+       size_t ftrace_size;
+@@ -358,7 +364,8 @@ static int ramoops_init_przs(struct devi
+               size_t sz = cxt->record_size;
+               cxt->przs[i] = persistent_ram_new(*paddr, sz, 0,
+-                                                &cxt->ecc_info);
++                                                &cxt->ecc_info,
++                                                cxt->memtype);
+               if (IS_ERR(cxt->przs[i])) {
+                       err = PTR_ERR(cxt->przs[i]);
+                       dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
+@@ -388,7 +395,7 @@ static int ramoops_init_prz(struct devic
+               return -ENOMEM;
+       }
+-      *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info);
++      *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
+       if (IS_ERR(*prz)) {
+               int err = PTR_ERR(*prz);
+@@ -435,6 +442,7 @@ static int ramoops_probe(struct platform
+       cxt->size = pdata->mem_size;
+       cxt->phys_addr = pdata->mem_address;
++      cxt->memtype = pdata->mem_type;
+       cxt->record_size = pdata->record_size;
+       cxt->console_size = pdata->console_size;
+       cxt->ftrace_size = pdata->ftrace_size;
+@@ -564,6 +572,7 @@ static void ramoops_register_dummy(void)
+       dummy_data->mem_size = mem_size;
+       dummy_data->mem_address = mem_address;
++      dummy_data->mem_type = 0;
+       dummy_data->record_size = record_size;
+       dummy_data->console_size = ramoops_console_size;
+       dummy_data->ftrace_size = ramoops_ftrace_size;
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -380,7 +380,8 @@ void persistent_ram_zap(struct persisten
+       persistent_ram_update_header_ecc(prz);
+ }
+-static void *persistent_ram_vmap(phys_addr_t start, size_t size)
++static void *persistent_ram_vmap(phys_addr_t start, size_t size,
++              unsigned int memtype)
+ {
+       struct page **pages;
+       phys_addr_t page_start;
+@@ -392,7 +393,10 @@ static void *persistent_ram_vmap(phys_ad
+       page_start = start - offset_in_page(start);
+       page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
+-      prot = pgprot_writecombine(PAGE_KERNEL);
++      if (memtype)
++              prot = pgprot_noncached(PAGE_KERNEL);
++      else
++              prot = pgprot_writecombine(PAGE_KERNEL);
+       pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
+       if (!pages) {
+@@ -411,8 +415,11 @@ static void *persistent_ram_vmap(phys_ad
+       return vaddr;
+ }
+-static void *persistent_ram_iomap(phys_addr_t start, size_t size)
++static void *persistent_ram_iomap(phys_addr_t start, size_t size,
++              unsigned int memtype)
+ {
++      void *va;
++
+       if (!request_mem_region(start, size, "persistent_ram")) {
+               pr_err("request mem region (0x%llx@0x%llx) failed\n",
+                       (unsigned long long)size, (unsigned long long)start);
+@@ -422,19 +429,24 @@ static void *persistent_ram_iomap(phys_a
+       buffer_start_add = buffer_start_add_locked;
+       buffer_size_add = buffer_size_add_locked;
+-      return ioremap_wc(start, size);
++      if (memtype)
++              va = ioremap(start, size);
++      else
++              va = ioremap_wc(start, size);
++
++      return va;
+ }
+ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
+-              struct persistent_ram_zone *prz)
++              struct persistent_ram_zone *prz, int memtype)
+ {
+       prz->paddr = start;
+       prz->size = size;
+       if (pfn_valid(start >> PAGE_SHIFT))
+-              prz->vaddr = persistent_ram_vmap(start, size);
++              prz->vaddr = persistent_ram_vmap(start, size, memtype);
+       else
+-              prz->vaddr = persistent_ram_iomap(start, size);
++              prz->vaddr = persistent_ram_iomap(start, size, memtype);
+       if (!prz->vaddr) {
+               pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
+@@ -500,7 +512,8 @@ void persistent_ram_free(struct persiste
+ }
+ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+-                      u32 sig, struct persistent_ram_ecc_info *ecc_info)
++                      u32 sig, struct persistent_ram_ecc_info *ecc_info,
++                      unsigned int memtype)
+ {
+       struct persistent_ram_zone *prz;
+       int ret = -ENOMEM;
+@@ -511,7 +524,7 @@ struct persistent_ram_zone *persistent_r
+               goto err;
+       }
+-      ret = persistent_ram_buffer_map(start, size, prz);
++      ret = persistent_ram_buffer_map(start, size, prz, memtype);
+       if (ret)
+               goto err;
+--- a/include/linux/pstore_ram.h
++++ b/include/linux/pstore_ram.h
+@@ -53,7 +53,8 @@ struct persistent_ram_zone {
+ };
+ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+-                      u32 sig, struct persistent_ram_ecc_info *ecc_info);
++                      u32 sig, struct persistent_ram_ecc_info *ecc_info,
++                      unsigned int memtype);
+ void persistent_ram_free(struct persistent_ram_zone *prz);
+ void persistent_ram_zap(struct persistent_ram_zone *prz);
+@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct
+ struct ramoops_platform_data {
+       unsigned long   mem_size;
+       unsigned long   mem_address;
++      unsigned int    mem_type;
+       unsigned long   record_size;
+       unsigned long   console_size;
+       unsigned long   ftrace_size;
diff --git a/queue-3.18/pstore-ram-fix-hangs-by-using-write-combine-mappings.patch b/queue-3.18/pstore-ram-fix-hangs-by-using-write-combine-mappings.patch
new file mode 100644 (file)
index 0000000..34efb14
--- /dev/null
@@ -0,0 +1,68 @@
+From 7ae9cb81933515dc7db1aa3c47ef7653717e3090 Mon Sep 17 00:00:00 2001
+From: Rob Herring <robherring2@gmail.com>
+Date: Fri, 12 Sep 2014 11:32:24 -0700
+Subject: pstore-ram: Fix hangs by using write-combine mappings
+
+From: Rob Herring <robherring2@gmail.com>
+
+commit 7ae9cb81933515dc7db1aa3c47ef7653717e3090 upstream.
+
+Currently trying to use pstore on at least ARMs can hang as we're
+mapping the peristent RAM with pgprot_noncached().
+
+On ARMs, pgprot_noncached() will actually make the memory strongly
+ordered, and as the atomic operations pstore uses are implementation
+defined for strongly ordered memory, they may not work. So basically
+atomic operations have undefined behavior on ARM for device or strongly
+ordered memory types.
+
+Let's fix the issue by using write-combine variants for mappings. This
+corresponds to normal, non-cacheable memory on ARM. For many other
+architectures, this change does not change the mapping type as by
+default we have:
+
+#define pgprot_writecombine pgprot_noncached
+
+The reason why pgprot_noncached() was originaly used for pstore
+is because Colin Cross <ccross@android.com> had observed lost
+debug prints right before a device hanging write operation on some
+systems. For the platforms supporting pgprot_noncached(), we can
+add a an optional configuration option to support that. But let's
+get pstore working first before adding new features.
+
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Anton Vorontsov <cbouatmailru@gmail.com>
+Cc: Colin Cross <ccross@android.com>
+Cc: Olof Johansson <olof@lixom.net>
+Cc: linux-kernel@vger.kernel.org
+Acked-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Rob Herring <rob.herring@calxeda.com>
+[tony@atomide.com: updated description]
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram_core.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -392,7 +392,7 @@ static void *persistent_ram_vmap(phys_ad
+       page_start = start - offset_in_page(start);
+       page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
+-      prot = pgprot_noncached(PAGE_KERNEL);
++      prot = pgprot_writecombine(PAGE_KERNEL);
+       pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
+       if (!pages) {
+@@ -422,7 +422,7 @@ static void *persistent_ram_iomap(phys_a
+       buffer_start_add = buffer_start_add_locked;
+       buffer_size_add = buffer_size_add_locked;
+-      return ioremap(start, size);
++      return ioremap_wc(start, size);
+ }
+ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
index 2d46426f6fa08950d50e7954d6463228a782240d..b9fb9b7058b4d346c8d97579731e76945f682362 100644 (file)
@@ -37,3 +37,10 @@ bluetooth-clear-le-white-list-when-resetting-controller.patch
 bluetooth-fix-controller-configuration-with-hci_quirk_invalid_bdaddr.patch
 bluetooth-fix-accepting-connections-when-not-using-mgmt.patch
 pci-restore-detection-of-read-only-bars.patch
+brcmfmac-fix-ifidx-for-rx-data-by-msgbuf.patch
+pstore-ram-fix-hangs-by-using-write-combine-mappings.patch
+pstore-ram-allow-optional-mapping-with-pgprot_noncached.patch
+ubi-fix-invalid-vfree.patch
+ubi-fix-double-free-after-do_sync_erase.patch
+iommu-vt-d-fix-an-off-by-one-bug-in-__domain_mapping.patch
+iommu-vt-d-fix-dmar_domain-leak-in-iommu_attach_device.patch
diff --git a/queue-3.18/ubi-fix-double-free-after-do_sync_erase.patch b/queue-3.18/ubi-fix-double-free-after-do_sync_erase.patch
new file mode 100644 (file)
index 0000000..72cf63b
--- /dev/null
@@ -0,0 +1,57 @@
+From aa5ad3b6eb8feb2399a5d26c8fb0060561bb9534 Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Thu, 6 Nov 2014 16:47:49 +0100
+Subject: UBI: Fix double free after do_sync_erase()
+
+From: Richard Weinberger <richard@nod.at>
+
+commit aa5ad3b6eb8feb2399a5d26c8fb0060561bb9534 upstream.
+
+If the erase worker is unable to erase a PEB it will
+free the ubi_wl_entry itself.
+The failing ubi_wl_entry must not free()'d again after
+do_sync_erase() returns.
+
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/wl.c |   10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1212,7 +1212,6 @@ static int wear_leveling_worker(struct u
+       err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
+       if (err) {
+-              kmem_cache_free(ubi_wl_entry_slab, e1);
+               if (e2)
+                       kmem_cache_free(ubi_wl_entry_slab, e2);
+               goto out_ro;
+@@ -1226,10 +1225,8 @@ static int wear_leveling_worker(struct u
+               dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
+                      e2->pnum, vol_id, lnum);
+               err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
+-              if (err) {
+-                      kmem_cache_free(ubi_wl_entry_slab, e2);
++              if (err)
+                       goto out_ro;
+-              }
+       }
+       dbg_wl("done");
+@@ -1265,10 +1262,9 @@ out_not_moved:
+       ubi_free_vid_hdr(ubi, vid_hdr);
+       err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
+-      if (err) {
+-              kmem_cache_free(ubi_wl_entry_slab, e2);
++      if (err)
+               goto out_ro;
+-      }
++
+       mutex_unlock(&ubi->move_mutex);
+       return 0;
diff --git a/queue-3.18/ubi-fix-invalid-vfree.patch b/queue-3.18/ubi-fix-invalid-vfree.patch
new file mode 100644 (file)
index 0000000..e169a8f
--- /dev/null
@@ -0,0 +1,72 @@
+From f38aed975c0c3645bbdfc5ebe35726e64caaf588 Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Mon, 27 Oct 2014 00:46:11 +0100
+Subject: UBI: Fix invalid vfree()
+
+From: Richard Weinberger <richard@nod.at>
+
+commit f38aed975c0c3645bbdfc5ebe35726e64caaf588 upstream.
+
+The logic of vfree()'ing vol->upd_buf is tied to vol->updating.
+In ubi_start_update() vol->updating is set long before vmalloc()'ing
+vol->upd_buf. If we encounter a write failure in ubi_start_update()
+before vmalloc() the UBI device release function will try to vfree()
+vol->upd_buf because vol->updating is set.
+Fix this by allocating vol->upd_buf directly after setting vol->updating.
+
+Fixes:
+[   31.559338] UBI warning: vol_cdev_release: update of volume 2 not finished, volume is damaged
+[   31.559340] ------------[ cut here ]------------
+[   31.559343] WARNING: CPU: 1 PID: 2747 at mm/vmalloc.c:1446 __vunmap+0xe3/0x110()
+[   31.559344] Trying to vfree() nonexistent vm area (ffffc90001f2b000)
+[   31.559345] Modules linked in:
+[   31.565620]  0000000000000bba ffff88002a0cbdb0 ffffffff818f0497 ffff88003b9ba148
+[   31.566347]  ffff88002a0cbde0 ffffffff8156f515 ffff88003b9ba148 0000000000000bba
+[   31.567073]  0000000000000000 0000000000000000 ffff88002a0cbe88 ffffffff8156c10a
+[   31.567793] Call Trace:
+[   31.568034]  [<ffffffff818f0497>] dump_stack+0x4e/0x7a
+[   31.568510]  [<ffffffff8156f515>] ubi_io_write_vid_hdr+0x155/0x160
+[   31.569084]  [<ffffffff8156c10a>] ubi_eba_write_leb+0x23a/0x870
+[   31.569628]  [<ffffffff81569b36>] vol_cdev_write+0x226/0x380
+[   31.570155]  [<ffffffff81179265>] vfs_write+0xb5/0x1f0
+[   31.570627]  [<ffffffff81179f8a>] SyS_pwrite64+0x6a/0xa0
+[   31.571123]  [<ffffffff818fde12>] system_call_fastpath+0x16/0x1b
+
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/upd.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/mtd/ubi/upd.c
++++ b/drivers/mtd/ubi/upd.c
+@@ -133,6 +133,10 @@ int ubi_start_update(struct ubi_device *
+       ubi_assert(!vol->updating && !vol->changing_leb);
+       vol->updating = 1;
++      vol->upd_buf = vmalloc(ubi->leb_size);
++      if (!vol->upd_buf)
++              return -ENOMEM;
++
+       err = set_update_marker(ubi, vol);
+       if (err)
+               return err;
+@@ -152,14 +156,12 @@ int ubi_start_update(struct ubi_device *
+               err = clear_update_marker(ubi, vol, 0);
+               if (err)
+                       return err;
++
++              vfree(vol->upd_buf);
+               vol->updating = 0;
+               return 0;
+       }
+-      vol->upd_buf = vmalloc(ubi->leb_size);
+-      if (!vol->upd_buf)
+-              return -ENOMEM;
+-
+       vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
+                              vol->usable_leb_size);
+       vol->upd_bytes = bytes;