]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 2 May 2016 18:51:39 +0000 (11:51 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 2 May 2016 18:51:39 +0000 (11:51 -0700)
added patches:
ext4-fix-null-pointer-dereference-in-ext4_mark_inode_dirty.patch
x86-mm-kmmio-fix-mmiotrace-for-hugepages.patch

queue-4.4/ext4-fix-null-pointer-dereference-in-ext4_mark_inode_dirty.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/x86-mm-kmmio-fix-mmiotrace-for-hugepages.patch [new file with mode: 0644]

diff --git a/queue-4.4/ext4-fix-null-pointer-dereference-in-ext4_mark_inode_dirty.patch b/queue-4.4/ext4-fix-null-pointer-dereference-in-ext4_mark_inode_dirty.patch
new file mode 100644 (file)
index 0000000..47ffea2
--- /dev/null
@@ -0,0 +1,87 @@
+From 5e1021f2b6dff1a86a468a1424d59faae2bc63c1 Mon Sep 17 00:00:00 2001
+From: Eryu Guan <guaneryu@gmail.com>
+Date: Sat, 12 Mar 2016 21:40:32 -0500
+Subject: ext4: fix NULL pointer dereference in ext4_mark_inode_dirty()
+
+From: Eryu Guan <guaneryu@gmail.com>
+
+commit 5e1021f2b6dff1a86a468a1424d59faae2bc63c1 upstream.
+
+ext4_reserve_inode_write() in ext4_mark_inode_dirty() could fail on
+error (e.g. EIO) and iloc.bh can be NULL in this case. But the error is
+ignored in the following "if" condition and ext4_expand_extra_isize()
+might be called with NULL iloc.bh set, which triggers NULL pointer
+dereference.
+
+This is uncovered by commit 8b4953e13f4c ("ext4: reserve code points for
+the project quota feature"), which enlarges the ext4_inode size, and
+run the following script on new kernel but with old mke2fs:
+
+  #/bin/bash
+  mnt=/mnt/ext4
+  devname=ext4-error
+  dev=/dev/mapper/$devname
+  fsimg=/home/fs.img
+
+  trap cleanup 0 1 2 3 9 15
+
+  cleanup()
+  {
+          umount $mnt >/dev/null 2>&1
+          dmsetup remove $devname
+          losetup -d $backend_dev
+          rm -f $fsimg
+          exit 0
+  }
+
+  rm -f $fsimg
+  fallocate -l 1g $fsimg
+  backend_dev=`losetup -f --show $fsimg`
+  devsize=`blockdev --getsz $backend_dev`
+
+  good_tab="0 $devsize linear $backend_dev 0"
+  error_tab="0 $devsize error $backend_dev 0"
+
+  dmsetup create $devname --table "$good_tab"
+
+  mkfs -t ext4 $dev
+  mount -t ext4 -o errors=continue,strictatime $dev $mnt
+
+  dmsetup load $devname --table "$error_tab" && dmsetup resume $devname
+  echo 3 > /proc/sys/vm/drop_caches
+  ls -l $mnt
+  exit 0
+
+[ Patch changed to simplify the function a tiny bit. -- Ted ]
+
+Signed-off-by: Eryu Guan <guaneryu@gmail.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inode.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5109,6 +5109,8 @@ int ext4_mark_inode_dirty(handle_t *hand
+       might_sleep();
+       trace_ext4_mark_inode_dirty(inode, _RET_IP_);
+       err = ext4_reserve_inode_write(handle, inode, &iloc);
++      if (err)
++              return err;
+       if (ext4_handle_valid(handle) &&
+           EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
+           !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
+@@ -5139,9 +5141,7 @@ int ext4_mark_inode_dirty(handle_t *hand
+                       }
+               }
+       }
+-      if (!err)
+-              err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+-      return err;
++      return ext4_mark_iloc_dirty(handle, inode, &iloc);
+ }
+ /*
index 6c2b4bbe80e66fac041288a343a3f9fd2631fa61..f3e62555ef52c33e83973c86c4daa58ed0bc9640 100644 (file)
@@ -134,3 +134,5 @@ rtc-max77686-properly-handle-regmap_irq_get_virq-error-code.patch
 drivers-misc-ad525x_dpot-ad5274-fix-rdac-read-back-errors.patch
 perf-evlist-reference-count-the-cpu-and-thread-maps-at-set_maps.patch
 spi-rockchip-modify-dma-max-burst-to-1.patch
+x86-mm-kmmio-fix-mmiotrace-for-hugepages.patch
+ext4-fix-null-pointer-dereference-in-ext4_mark_inode_dirty.patch
diff --git a/queue-4.4/x86-mm-kmmio-fix-mmiotrace-for-hugepages.patch b/queue-4.4/x86-mm-kmmio-fix-mmiotrace-for-hugepages.patch
new file mode 100644 (file)
index 0000000..31cc142
--- /dev/null
@@ -0,0 +1,281 @@
+From cfa52c0cfa4d727aa3e457bf29aeff296c528a08 Mon Sep 17 00:00:00 2001
+From: Karol Herbst <nouveau@karolherbst.de>
+Date: Thu, 3 Mar 2016 02:03:11 +0100
+Subject: x86/mm/kmmio: Fix mmiotrace for hugepages
+
+From: Karol Herbst <nouveau@karolherbst.de>
+
+commit cfa52c0cfa4d727aa3e457bf29aeff296c528a08 upstream.
+
+Because Linux might use bigger pages than the 4K pages to handle those mmio
+ioremaps, the kmmio code shouldn't rely on the pade id as it currently does.
+
+Using the memory address instead of the page id lets us look up how big the
+page is and what its base address is, so that we won't get a page fault
+within the same page twice anymore.
+
+Tested-by: Pierre Moreau <pierre.morrow@free.fr>
+Signed-off-by: Karol Herbst <nouveau@karolherbst.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luis R. Rodriguez <mcgrof@suse.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: linux-mm@kvack.org
+Cc: linux-x86_64@vger.kernel.org
+Cc: nouveau@lists.freedesktop.org
+Cc: pq@iki.fi
+Cc: rostedt@goodmis.org
+Link: http://lkml.kernel.org/r/1456966991-6861-1-git-send-email-nouveau@karolherbst.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/kmmio.c |   88 ++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 59 insertions(+), 29 deletions(-)
+
+--- a/arch/x86/mm/kmmio.c
++++ b/arch/x86/mm/kmmio.c
+@@ -33,7 +33,7 @@
+ struct kmmio_fault_page {
+       struct list_head list;
+       struct kmmio_fault_page *release_next;
+-      unsigned long page; /* location of the fault page */
++      unsigned long addr; /* the requested address */
+       pteval_t old_presence; /* page presence prior to arming */
+       bool armed;
+@@ -70,9 +70,16 @@ unsigned int kmmio_count;
+ static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
+ static LIST_HEAD(kmmio_probes);
+-static struct list_head *kmmio_page_list(unsigned long page)
++static struct list_head *kmmio_page_list(unsigned long addr)
+ {
+-      return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
++      unsigned int l;
++      pte_t *pte = lookup_address(addr, &l);
++
++      if (!pte)
++              return NULL;
++      addr &= page_level_mask(l);
++
++      return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
+ }
+ /* Accessed per-cpu */
+@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_pro
+ }
+ /* You must be holding RCU read lock. */
+-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
++static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
+ {
+       struct list_head *head;
+       struct kmmio_fault_page *f;
++      unsigned int l;
++      pte_t *pte = lookup_address(addr, &l);
+-      page &= PAGE_MASK;
+-      head = kmmio_page_list(page);
++      if (!pte)
++              return NULL;
++      addr &= page_level_mask(l);
++      head = kmmio_page_list(addr);
+       list_for_each_entry_rcu(f, head, list) {
+-              if (f->page == page)
++              if (f->addr == addr)
+                       return f;
+       }
+       return NULL;
+@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pt
+ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
+ {
+       unsigned int level;
+-      pte_t *pte = lookup_address(f->page, &level);
++      pte_t *pte = lookup_address(f->addr, &level);
+       if (!pte) {
+-              pr_err("no pte for page 0x%08lx\n", f->page);
++              pr_err("no pte for addr 0x%08lx\n", f->addr);
+               return -1;
+       }
+@@ -156,7 +167,7 @@ static int clear_page_presence(struct km
+               return -1;
+       }
+-      __flush_tlb_one(f->page);
++      __flush_tlb_one(f->addr);
+       return 0;
+ }
+@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct k
+       int ret;
+       WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
+       if (f->armed) {
+-              pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
+-                         f->page, f->count, !!f->old_presence);
++              pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
++                         f->addr, f->count, !!f->old_presence);
+       }
+       ret = clear_page_presence(f, true);
+-      WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
+-                f->page);
++      WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
++                f->addr);
+       f->armed = true;
+       return ret;
+ }
+@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(stru
+ {
+       int ret = clear_page_presence(f, false);
+       WARN_ONCE(ret < 0,
+-                      KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
++                      KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
+       f->armed = false;
+ }
+@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs,
+       struct kmmio_context *ctx;
+       struct kmmio_fault_page *faultpage;
+       int ret = 0; /* default to fault not handled */
++      unsigned long page_base = addr;
++      unsigned int l;
++      pte_t *pte = lookup_address(addr, &l);
++      if (!pte)
++              return -EINVAL;
++      page_base &= page_level_mask(l);
+       /*
+        * Preemption is now disabled to prevent process switch during
+@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs,
+       preempt_disable();
+       rcu_read_lock();
+-      faultpage = get_kmmio_fault_page(addr);
++      faultpage = get_kmmio_fault_page(page_base);
+       if (!faultpage) {
+               /*
+                * Either this page fault is not caused by kmmio, or
+@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs,
+       ctx = &get_cpu_var(kmmio_ctx);
+       if (ctx->active) {
+-              if (addr == ctx->addr) {
++              if (page_base == ctx->addr) {
+                       /*
+                        * A second fault on the same page means some other
+                        * condition needs handling by do_page_fault(), the
+@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs,
+       ctx->active++;
+       ctx->fpage = faultpage;
+-      ctx->probe = get_kmmio_probe(addr);
++      ctx->probe = get_kmmio_probe(page_base);
+       ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
+-      ctx->addr = addr;
++      ctx->addr = page_base;
+       if (ctx->probe && ctx->probe->pre_handler)
+               ctx->probe->pre_handler(ctx->probe, regs, addr);
+@@ -354,12 +371,11 @@ out:
+ }
+ /* You must be holding kmmio_lock. */
+-static int add_kmmio_fault_page(unsigned long page)
++static int add_kmmio_fault_page(unsigned long addr)
+ {
+       struct kmmio_fault_page *f;
+-      page &= PAGE_MASK;
+-      f = get_kmmio_fault_page(page);
++      f = get_kmmio_fault_page(addr);
+       if (f) {
+               if (!f->count)
+                       arm_kmmio_fault_page(f);
+@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned
+               return -1;
+       f->count = 1;
+-      f->page = page;
++      f->addr = addr;
+       if (arm_kmmio_fault_page(f)) {
+               kfree(f);
+               return -1;
+       }
+-      list_add_rcu(&f->list, kmmio_page_list(f->page));
++      list_add_rcu(&f->list, kmmio_page_list(f->addr));
+       return 0;
+ }
+ /* You must be holding kmmio_lock. */
+-static void release_kmmio_fault_page(unsigned long page,
++static void release_kmmio_fault_page(unsigned long addr,
+                               struct kmmio_fault_page **release_list)
+ {
+       struct kmmio_fault_page *f;
+-      page &= PAGE_MASK;
+-      f = get_kmmio_fault_page(page);
++      f = get_kmmio_fault_page(addr);
+       if (!f)
+               return;
+@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_pr
+       int ret = 0;
+       unsigned long size = 0;
+       const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
++      unsigned int l;
++      pte_t *pte;
+       spin_lock_irqsave(&kmmio_lock, flags);
+       if (get_kmmio_probe(p->addr)) {
+               ret = -EEXIST;
+               goto out;
+       }
++
++      pte = lookup_address(p->addr, &l);
++      if (!pte) {
++              ret = -EINVAL;
++              goto out;
++      }
++
+       kmmio_count++;
+       list_add_rcu(&p->list, &kmmio_probes);
+       while (size < size_lim) {
+               if (add_kmmio_fault_page(p->addr + size))
+                       pr_err("Unable to set page fault.\n");
+-              size += PAGE_SIZE;
++              size += page_level_size(l);
+       }
+ out:
+       spin_unlock_irqrestore(&kmmio_lock, flags);
+@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio
+       const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
+       struct kmmio_fault_page *release_list = NULL;
+       struct kmmio_delayed_release *drelease;
++      unsigned int l;
++      pte_t *pte;
++
++      pte = lookup_address(p->addr, &l);
++      if (!pte)
++              return;
+       spin_lock_irqsave(&kmmio_lock, flags);
+       while (size < size_lim) {
+               release_kmmio_fault_page(p->addr + size, &release_list);
+-              size += PAGE_SIZE;
++              size += page_level_size(l);
+       }
+       list_del_rcu(&p->list);
+       kmmio_count--;