]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 8 Mar 2019 11:17:36 +0000 (12:17 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 8 Mar 2019 11:17:36 +0000 (12:17 +0100)
added patches:
applicom-fix-potential-spectre-v1-vulnerabilities.patch
hugetlbfs-fix-races-and-page-leaks-during-migration.patch
mips-irq-allocate-accurate-order-pages-for-irq-stack.patch
xtensa-fix-get_wchan.patch

queue-4.9/applicom-fix-potential-spectre-v1-vulnerabilities.patch [new file with mode: 0644]
queue-4.9/hugetlbfs-fix-races-and-page-leaks-during-migration.patch [new file with mode: 0644]
queue-4.9/mips-irq-allocate-accurate-order-pages-for-irq-stack.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/xtensa-fix-get_wchan.patch [new file with mode: 0644]

diff --git a/queue-4.9/applicom-fix-potential-spectre-v1-vulnerabilities.patch b/queue-4.9/applicom-fix-potential-spectre-v1-vulnerabilities.patch
new file mode 100644 (file)
index 0000000..9bac280
--- /dev/null
@@ -0,0 +1,104 @@
+From d7ac3c6ef5d8ce14b6381d52eb7adafdd6c8bb3c Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Wed, 9 Jan 2019 16:05:10 -0600
+Subject: applicom: Fix potential Spectre v1 vulnerabilities
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit d7ac3c6ef5d8ce14b6381d52eb7adafdd6c8bb3c upstream.
+
+IndexCard is indirectly controlled by user-space, hence leading to
+a potential exploitation of the Spectre variant 1 vulnerability.
+
+This issue was detected with the help of Smatch:
+
+drivers/char/applicom.c:418 ac_write() warn: potential spectre issue 'apbs' [r]
+drivers/char/applicom.c:728 ac_ioctl() warn: potential spectre issue 'apbs' [r] (local cap)
+
+Fix this by sanitizing IndexCard before using it to index apbs.
+
+Notice that given that speculation windows are large, the policy is
+to kill the speculation on the first load and not worry if it can be
+completed with a dependent load/store [1].
+
+[1] https://lore.kernel.org/lkml/20180423164740.GY17484@dhcp22.suse.cz/
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/applicom.c |   35 ++++++++++++++++++++++++-----------
+ 1 file changed, 24 insertions(+), 11 deletions(-)
+
+--- a/drivers/char/applicom.c
++++ b/drivers/char/applicom.c
+@@ -32,6 +32,7 @@
+ #include <linux/wait.h>
+ #include <linux/init.h>
+ #include <linux/fs.h>
++#include <linux/nospec.h>
+ #include <asm/io.h>
+ #include <asm/uaccess.h>
+@@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *fil
+       TicCard = st_loc.tic_des_from_pc;       /* tic number to send            */
+       IndexCard = NumCard - 1;
+-      if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
++      if (IndexCard >= MAX_BOARD)
++              return -EINVAL;
++      IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
++
++      if (!apbs[IndexCard].RamIO)
+               return -EINVAL;
+ #ifdef DEBUG
+@@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file,
+       unsigned char IndexCard;
+       void __iomem *pmem;
+       int ret = 0;
++      static int warncount = 10;
+       volatile unsigned char byte_reset_it;
+       struct st_ram_io *adgl;
+       void __user *argp = (void __user *)arg;
+@@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file,
+       mutex_lock(&ac_mutex);  
+       IndexCard = adgl->num_card-1;
+        
+-      if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
+-              static int warncount = 10;
+-              if (warncount) {
+-                      printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
+-                      warncount--;
+-              }
+-              kfree(adgl);
+-              mutex_unlock(&ac_mutex);
+-              return -EINVAL;
+-      }
++      if (cmd != 6 && IndexCard >= MAX_BOARD)
++              goto err;
++      IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
++
++      if (cmd != 6 && !apbs[IndexCard].RamIO)
++              goto err;
+       switch (cmd) {
+               
+@@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file,
+       kfree(adgl);
+       mutex_unlock(&ac_mutex);
+       return 0;
++
++err:
++      if (warncount) {
++              pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
++                      (int)IndexCard + 1);
++              warncount--;
++      }
++      kfree(adgl);
++      mutex_unlock(&ac_mutex);
++      return -EINVAL;
++
+ }
diff --git a/queue-4.9/hugetlbfs-fix-races-and-page-leaks-during-migration.patch b/queue-4.9/hugetlbfs-fix-races-and-page-leaks-during-migration.patch
new file mode 100644 (file)
index 0000000..e325776
--- /dev/null
@@ -0,0 +1,200 @@
+From cb6acd01e2e43fd8bad11155752b7699c3d0fb76 Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Thu, 28 Feb 2019 16:22:02 -0800
+Subject: hugetlbfs: fix races and page leaks during migration
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit cb6acd01e2e43fd8bad11155752b7699c3d0fb76 upstream.
+
+hugetlb pages should only be migrated if they are 'active'.  The
+routines set/clear_page_huge_active() modify the active state of hugetlb
+pages.
+
+When a new hugetlb page is allocated at fault time, set_page_huge_active
+is called before the page is locked.  Therefore, another thread could
+race and migrate the page while it is being added to page table by the
+fault code.  This race is somewhat hard to trigger, but can be seen by
+strategically adding udelay to simulate worst case scheduling behavior.
+Depending on 'how' the code races, various BUG()s could be triggered.
+
+To address this issue, simply delay the set_page_huge_active call until
+after the page is successfully added to the page table.
+
+Hugetlb pages can also be leaked at migration time if the pages are
+associated with a file in an explicitly mounted hugetlbfs filesystem.
+For example, consider a two node system with 4GB worth of huge pages
+available.  A program mmaps a 2G file in a hugetlbfs filesystem.  It
+then migrates the pages associated with the file from one node to
+another.  When the program exits, huge page counts are as follows:
+
+  node0
+  1024    free_hugepages
+  1024    nr_hugepages
+
+  node1
+  0       free_hugepages
+  1024    nr_hugepages
+
+  Filesystem                         Size  Used Avail Use% Mounted on
+  nodev                              4.0G  2.0G  2.0G  50% /var/opt/hugepool
+
+That is as expected.  2G of huge pages are taken from the free_hugepages
+counts, and 2G is the size of the file in the explicitly mounted
+filesystem.  If the file is then removed, the counts become:
+
+  node0
+  1024    free_hugepages
+  1024    nr_hugepages
+
+  node1
+  1024    free_hugepages
+  1024    nr_hugepages
+
+  Filesystem                         Size  Used Avail Use% Mounted on
+  nodev                              4.0G  2.0G  2.0G  50% /var/opt/hugepool
+
+Note that the filesystem still shows 2G of pages used, while there
+actually are no huge pages in use.  The only way to 'fix' the filesystem
+accounting is to unmount the filesystem
+
+If a hugetlb page is associated with an explicitly mounted filesystem,
+this information in contained in the page_private field.  At migration
+time, this information is not preserved.  To fix, simply transfer
+page_private from old to new page at migration time if necessary.
+
+There is a related race with removing a huge page from a file and
+migration.  When a huge page is removed from the pagecache, the
+page_mapping() field is cleared, yet page_private remains set until the
+page is actually freed by free_huge_page().  A page could be migrated
+while in this state.  However, since page_mapping() is not set the
+hugetlbfs specific routine to transfer page_private is not called and we
+leak the page count in the filesystem.
+
+To fix that, check for this condition before migrating a huge page.  If
+the condition is detected, return EBUSY for the page.
+
+Link: http://lkml.kernel.org/r/74510272-7319-7372-9ea6-ec914734c179@oracle.com
+Link: http://lkml.kernel.org/r/20190212221400.3512-1-mike.kravetz@oracle.com
+Fixes: bcc54222309c ("mm: hugetlb: introduce page_huge_active")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: <stable@vger.kernel.org>
+[mike.kravetz@oracle.com: v2]
+  Link: http://lkml.kernel.org/r/7534d322-d782-8ac6-1c8d-a8dc380eb3ab@oracle.com
+[mike.kravetz@oracle.com: update comment and changelog]
+  Link: http://lkml.kernel.org/r/420bcfd6-158b-38e4-98da-26d0cd85bd01@oracle.com
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/hugetlbfs/inode.c |   12 ++++++++++++
+ mm/hugetlb.c         |   14 ++++++++++++--
+ mm/migrate.c         |   11 +++++++++++
+ 3 files changed, 35 insertions(+), 2 deletions(-)
+
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -861,6 +861,18 @@ static int hugetlbfs_migrate_page(struct
+       rc = migrate_huge_page_move_mapping(mapping, newpage, page);
+       if (rc != MIGRATEPAGE_SUCCESS)
+               return rc;
++
++      /*
++       * page_private is subpool pointer in hugetlb pages.  Transfer to
++       * new page.  PagePrivate is not associated with page_private for
++       * hugetlb pages and can not be set here as only page_huge_active
++       * pages can be migrated.
++       */
++      if (page_private(page)) {
++              set_page_private(newpage, page_private(page));
++              set_page_private(page, 0);
++      }
++
+       migrate_page_copy(newpage, page);
+       return MIGRATEPAGE_SUCCESS;
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3579,7 +3579,6 @@ retry_avoidcopy:
+       copy_user_huge_page(new_page, old_page, address, vma,
+                           pages_per_huge_page(h));
+       __SetPageUptodate(new_page);
+-      set_page_huge_active(new_page);
+       mmun_start = address & huge_page_mask(h);
+       mmun_end = mmun_start + huge_page_size(h);
+@@ -3601,6 +3600,7 @@ retry_avoidcopy:
+                               make_huge_pte(vma, new_page, 1));
+               page_remove_rmap(old_page, true);
+               hugepage_add_new_anon_rmap(new_page, vma, address);
++              set_page_huge_active(new_page);
+               /* Make the old page be freed below */
+               new_page = old_page;
+       }
+@@ -3683,6 +3683,7 @@ static int hugetlb_no_page(struct mm_str
+       struct page *page;
+       pte_t new_pte;
+       spinlock_t *ptl;
++      bool new_page = false;
+       /*
+        * Currently, we are forced to kill the process in the event the
+@@ -3716,7 +3717,7 @@ retry:
+               }
+               clear_huge_page(page, address, pages_per_huge_page(h));
+               __SetPageUptodate(page);
+-              set_page_huge_active(page);
++              new_page = true;
+               if (vma->vm_flags & VM_MAYSHARE) {
+                       int err = huge_add_to_page_cache(page, mapping, idx);
+@@ -3788,6 +3789,15 @@ retry:
+       }
+       spin_unlock(ptl);
++
++      /*
++       * Only make newly allocated pages active.  Existing pages found
++       * in the pagecache could be !page_huge_active() if they have been
++       * isolated for migration.
++       */
++      if (new_page)
++              set_page_huge_active(page);
++
+       unlock_page(page);
+ out:
+       return ret;
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1234,6 +1234,16 @@ static int unmap_and_move_huge_page(new_
+               lock_page(hpage);
+       }
++      /*
++       * Check for pages which are in the process of being freed.  Without
++       * page_mapping() set, hugetlbfs specific move page routine will not
++       * be called and we could leak usage counts for subpools.
++       */
++      if (page_private(hpage) && !page_mapping(hpage)) {
++              rc = -EBUSY;
++              goto out_unlock;
++      }
++
+       if (PageAnon(hpage))
+               anon_vma = page_get_anon_vma(hpage);
+@@ -1265,6 +1275,7 @@ put_anon:
+               set_page_owner_migrate_reason(new_hpage, reason);
+       }
++out_unlock:
+       unlock_page(hpage);
+ out:
+       if (rc != -EAGAIN)
diff --git a/queue-4.9/mips-irq-allocate-accurate-order-pages-for-irq-stack.patch b/queue-4.9/mips-irq-allocate-accurate-order-pages-for-irq-stack.patch
new file mode 100644 (file)
index 0000000..5a9e90a
--- /dev/null
@@ -0,0 +1,44 @@
+From 72faa7a773ca59336f3c889e878de81445c5a85c Mon Sep 17 00:00:00 2001
+From: Liu Xiang <liu.xiang6@zte.com.cn>
+Date: Sat, 16 Feb 2019 17:12:24 +0800
+Subject: MIPS: irq: Allocate accurate order pages for irq stack
+
+From: Liu Xiang <liu.xiang6@zte.com.cn>
+
+commit 72faa7a773ca59336f3c889e878de81445c5a85c upstream.
+
+The irq_pages is the number of pages for irq stack, but not the
+order which is needed by __get_free_pages().
+We can use get_order() to calculate the accurate order.
+
+Signed-off-by: Liu Xiang <liu.xiang6@zte.com.cn>
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Fixes: fe8bd18ffea5 ("MIPS: Introduce irq_stack")
+Cc: linux-mips@vger.kernel.org
+Cc: stable@vger.kernel.org # v4.11+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/irq.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
+ void __init init_IRQ(void)
+ {
+       int i;
++      unsigned int order = get_order(IRQ_STACK_SIZE);
+       for (i = 0; i < NR_IRQS; i++)
+               irq_set_noprobe(i);
+@@ -62,8 +63,7 @@ void __init init_IRQ(void)
+       arch_init_irq();
+       for_each_possible_cpu(i) {
+-              int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
+-              void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
++              void *s = (void *)__get_free_pages(GFP_KERNEL, order);
+               irq_stack[i] = s;
+               pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
index cb2b1df52276f250f53c20a7b0fed4ad1ea1b780..88c6296e66a288e7141732926307dbc7ef9ce136 100644 (file)
@@ -26,3 +26,7 @@ tun-fix-blocking-read.patch
 tun-remove-unnecessary-memory-barrier.patch
 net-phy-micrel-ksz8061-link-failure-after-cable-connect.patch
 x86-cpu-amd-set-the-cpb-bit-unconditionally-on-f17h.patch
+applicom-fix-potential-spectre-v1-vulnerabilities.patch
+mips-irq-allocate-accurate-order-pages-for-irq-stack.patch
+hugetlbfs-fix-races-and-page-leaks-during-migration.patch
+xtensa-fix-get_wchan.patch
diff --git a/queue-4.9/xtensa-fix-get_wchan.patch b/queue-4.9/xtensa-fix-get_wchan.patch
new file mode 100644 (file)
index 0000000..9c88358
--- /dev/null
@@ -0,0 +1,35 @@
+From d90b88fd3653f1fb66ecc6571b860d5a5749fa56 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Wed, 2 Jan 2019 01:08:32 -0800
+Subject: xtensa: fix get_wchan
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit d90b88fd3653f1fb66ecc6571b860d5a5749fa56 upstream.
+
+Stack unwinding is implemented incorrectly in xtensa get_wchan: instead
+of extracting a0 and a1 registers from the spill location under the
+stack pointer it extracts a word pointed to by the stack pointer and
+subtracts 4 or 3 from it.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/kernel/process.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/xtensa/kernel/process.c
++++ b/arch/xtensa/kernel/process.c
+@@ -311,8 +311,8 @@ unsigned long get_wchan(struct task_stru
+               /* Stack layout: sp-4: ra, sp-3: sp' */
+-              pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
+-              sp = *(unsigned long *)sp - 3;
++              pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
++              sp = SPILL_SLOT(sp, 1);
+       } while (count++ < 16);
+       return 0;
+ }