--- /dev/null
+From d7ac3c6ef5d8ce14b6381d52eb7adafdd6c8bb3c Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Wed, 9 Jan 2019 16:05:10 -0600
+Subject: applicom: Fix potential Spectre v1 vulnerabilities
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit d7ac3c6ef5d8ce14b6381d52eb7adafdd6c8bb3c upstream.
+
+IndexCard is indirectly controlled by user-space, hence leading to
+a potential exploitation of the Spectre variant 1 vulnerability.
+
+This issue was detected with the help of Smatch:
+
+drivers/char/applicom.c:418 ac_write() warn: potential spectre issue 'apbs' [r]
+drivers/char/applicom.c:728 ac_ioctl() warn: potential spectre issue 'apbs' [r] (local cap)
+
+Fix this by sanitizing IndexCard before using it to index apbs.
+
+Notice that given that speculation windows are large, the policy is
+to kill the speculation on the first load and not worry if it can be
+completed with a dependent load/store [1].
+
+[1] https://lore.kernel.org/lkml/20180423164740.GY17484@dhcp22.suse.cz/
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/applicom.c | 35 ++++++++++++++++++++++++-----------
+ 1 file changed, 24 insertions(+), 11 deletions(-)
+
+--- a/drivers/char/applicom.c
++++ b/drivers/char/applicom.c
+@@ -32,6 +32,7 @@
+ #include <linux/wait.h>
+ #include <linux/init.h>
+ #include <linux/fs.h>
++#include <linux/nospec.h>
+
+ #include <asm/io.h>
+ #include <linux/uaccess.h>
+@@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *fil
+ TicCard = st_loc.tic_des_from_pc; /* tic number to send */
+ IndexCard = NumCard - 1;
+
+- if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
++ if (IndexCard >= MAX_BOARD)
++ return -EINVAL;
++ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
++
++ if (!apbs[IndexCard].RamIO)
+ return -EINVAL;
+
+ #ifdef DEBUG
+@@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file,
+ unsigned char IndexCard;
+ void __iomem *pmem;
+ int ret = 0;
++ static int warncount = 10;
+ volatile unsigned char byte_reset_it;
+ struct st_ram_io *adgl;
+ void __user *argp = (void __user *)arg;
+@@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file,
+ mutex_lock(&ac_mutex);
+ IndexCard = adgl->num_card-1;
+
+- if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
+- static int warncount = 10;
+- if (warncount) {
+- printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
+- warncount--;
+- }
+- kfree(adgl);
+- mutex_unlock(&ac_mutex);
+- return -EINVAL;
+- }
++ if (cmd != 6 && IndexCard >= MAX_BOARD)
++ goto err;
++ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
++
++ if (cmd != 6 && !apbs[IndexCard].RamIO)
++ goto err;
+
+ switch (cmd) {
+
+@@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file,
+ kfree(adgl);
+ mutex_unlock(&ac_mutex);
+ return 0;
++
++err:
++ if (warncount) {
++ pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
++ (int)IndexCard + 1);
++ warncount--;
++ }
++ kfree(adgl);
++ mutex_unlock(&ac_mutex);
++ return -EINVAL;
++
+ }
+
--- /dev/null
+From c4f5627f7eeecde1bb6b646d8c0907b96dc2b2a6 Mon Sep 17 00:00:00 2001
+From: Matthias Kaehlcke <mka@chromium.org>
+Date: Wed, 2 Jan 2019 16:11:20 -0800
+Subject: Bluetooth: Fix locking in bt_accept_enqueue() for BH context
+
+From: Matthias Kaehlcke <mka@chromium.org>
+
+commit c4f5627f7eeecde1bb6b646d8c0907b96dc2b2a6 upstream.
+
+With commit e16337622016 ("Bluetooth: Handle bt_accept_enqueue() socket
+atomically") lock_sock[_nested]() is used to acquire the socket lock
+before manipulating the socket. lock_sock[_nested]() may block, which
+is problematic since bt_accept_enqueue() can be called in bottom half
+context (e.g. from rfcomm_connect_ind()):
+
+[<ffffff80080d81ec>] __might_sleep+0x4c/0x80
+[<ffffff800876c7b0>] lock_sock_nested+0x24/0x58
+[<ffffff8000d7c27c>] bt_accept_enqueue+0x48/0xd4 [bluetooth]
+[<ffffff8000e67d8c>] rfcomm_connect_ind+0x190/0x218 [rfcomm]
+
+Add a parameter to bt_accept_enqueue() to indicate whether the
+function is called from BH context, and acquire the socket lock
+with bh_lock_sock_nested() if that's the case.
+
+Also adapt all callers of bt_accept_enqueue() to pass the new
+parameter:
+
+- l2cap_sock_new_connection_cb()
+ - uses lock_sock() to lock the parent socket => process context
+
+- rfcomm_connect_ind()
+ - acquires the parent socket lock with bh_lock_sock() => BH
+ context
+
+- __sco_chan_add()
+ - called from sco_chan_add(), which is called from sco_connect().
+ parent is NULL, hence bt_accept_enqueue() isn't called in this
+ code path and we can ignore it
+ - also called from sco_conn_ready(). uses bh_lock_sock() to acquire
+ the parent lock => BH context
+
+Fixes: e16337622016 ("Bluetooth: Handle bt_accept_enqueue() socket atomically")
+Signed-off-by: Matthias Kaehlcke <mka@chromium.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/net/bluetooth/bluetooth.h | 2 +-
+ net/bluetooth/af_bluetooth.c | 16 +++++++++++++---
+ net/bluetooth/l2cap_sock.c | 2 +-
+ net/bluetooth/rfcomm/sock.c | 2 +-
+ net/bluetooth/sco.c | 2 +-
+ 5 files changed, 17 insertions(+), 7 deletions(-)
+
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -273,7 +273,7 @@ int bt_sock_ioctl(struct socket *sock,
+ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
+ int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
+
+-void bt_accept_enqueue(struct sock *parent, struct sock *sk);
++void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
+ void bt_accept_unlink(struct sock *sk);
+ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
+
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -154,15 +154,25 @@ void bt_sock_unlink(struct bt_sock_list
+ }
+ EXPORT_SYMBOL(bt_sock_unlink);
+
+-void bt_accept_enqueue(struct sock *parent, struct sock *sk)
++void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
+ {
+ BT_DBG("parent %p, sk %p", parent, sk);
+
+ sock_hold(sk);
+- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
++
++ if (bh)
++ bh_lock_sock_nested(sk);
++ else
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
++
+ list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
+ bt_sk(sk)->parent = parent;
+- release_sock(sk);
++
++ if (bh)
++ bh_unlock_sock(sk);
++ else
++ release_sock(sk);
++
+ parent->sk_ack_backlog++;
+ }
+ EXPORT_SYMBOL(bt_accept_enqueue);
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1253,7 +1253,7 @@ static struct l2cap_chan *l2cap_sock_new
+
+ l2cap_sock_init(sk, parent);
+
+- bt_accept_enqueue(parent, sk);
++ bt_accept_enqueue(parent, sk, false);
+
+ release_sock(parent);
+
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -988,7 +988,7 @@ int rfcomm_connect_ind(struct rfcomm_ses
+ rfcomm_pi(sk)->channel = channel;
+
+ sk->sk_state = BT_CONFIG;
+- bt_accept_enqueue(parent, sk);
++ bt_accept_enqueue(parent, sk, true);
+
+ /* Accept connection and return socket DLC */
+ *d = rfcomm_pi(sk)->dlc;
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -193,7 +193,7 @@ static void __sco_chan_add(struct sco_co
+ conn->sk = sk;
+
+ if (parent)
+- bt_accept_enqueue(parent, sk);
++ bt_accept_enqueue(parent, sk, true);
+ }
+
+ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
--- /dev/null
+From cb6acd01e2e43fd8bad11155752b7699c3d0fb76 Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Thu, 28 Feb 2019 16:22:02 -0800
+Subject: hugetlbfs: fix races and page leaks during migration
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit cb6acd01e2e43fd8bad11155752b7699c3d0fb76 upstream.
+
+hugetlb pages should only be migrated if they are 'active'. The
+routines set/clear_page_huge_active() modify the active state of hugetlb
+pages.
+
+When a new hugetlb page is allocated at fault time, set_page_huge_active
+is called before the page is locked. Therefore, another thread could
+race and migrate the page while it is being added to page table by the
+fault code. This race is somewhat hard to trigger, but can be seen by
+strategically adding udelay to simulate worst case scheduling behavior.
+Depending on 'how' the code races, various BUG()s could be triggered.
+
+To address this issue, simply delay the set_page_huge_active call until
+after the page is successfully added to the page table.
+
+Hugetlb pages can also be leaked at migration time if the pages are
+associated with a file in an explicitly mounted hugetlbfs filesystem.
+For example, consider a two node system with 4GB worth of huge pages
+available. A program mmaps a 2G file in a hugetlbfs filesystem. It
+then migrates the pages associated with the file from one node to
+another. When the program exits, huge page counts are as follows:
+
+ node0
+ 1024 free_hugepages
+ 1024 nr_hugepages
+
+ node1
+ 0 free_hugepages
+ 1024 nr_hugepages
+
+ Filesystem Size Used Avail Use% Mounted on
+ nodev 4.0G 2.0G 2.0G 50% /var/opt/hugepool
+
+That is as expected. 2G of huge pages are taken from the free_hugepages
+counts, and 2G is the size of the file in the explicitly mounted
+filesystem. If the file is then removed, the counts become:
+
+ node0
+ 1024 free_hugepages
+ 1024 nr_hugepages
+
+ node1
+ 1024 free_hugepages
+ 1024 nr_hugepages
+
+ Filesystem Size Used Avail Use% Mounted on
+ nodev 4.0G 2.0G 2.0G 50% /var/opt/hugepool
+
+Note that the filesystem still shows 2G of pages used, while there
+actually are no huge pages in use. The only way to 'fix' the filesystem
+accounting is to unmount the filesystem
+
+If a hugetlb page is associated with an explicitly mounted filesystem,
+this information in contained in the page_private field. At migration
+time, this information is not preserved. To fix, simply transfer
+page_private from old to new page at migration time if necessary.
+
+There is a related race with removing a huge page from a file and
+migration. When a huge page is removed from the pagecache, the
+page_mapping() field is cleared, yet page_private remains set until the
+page is actually freed by free_huge_page(). A page could be migrated
+while in this state. However, since page_mapping() is not set the
+hugetlbfs specific routine to transfer page_private is not called and we
+leak the page count in the filesystem.
+
+To fix that, check for this condition before migrating a huge page. If
+the condition is detected, return EBUSY for the page.
+
+Link: http://lkml.kernel.org/r/74510272-7319-7372-9ea6-ec914734c179@oracle.com
+Link: http://lkml.kernel.org/r/20190212221400.3512-1-mike.kravetz@oracle.com
+Fixes: bcc54222309c ("mm: hugetlb: introduce page_huge_active")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: <stable@vger.kernel.org>
+[mike.kravetz@oracle.com: v2]
+ Link: http://lkml.kernel.org/r/7534d322-d782-8ac6-1c8d-a8dc380eb3ab@oracle.com
+[mike.kravetz@oracle.com: update comment and changelog]
+ Link: http://lkml.kernel.org/r/420bcfd6-158b-38e4-98da-26d0cd85bd01@oracle.com
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/hugetlbfs/inode.c | 12 ++++++++++++
+ mm/hugetlb.c | 16 +++++++++++++---
+ mm/migrate.c | 11 +++++++++++
+ 3 files changed, 36 insertions(+), 3 deletions(-)
+
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -845,6 +845,18 @@ static int hugetlbfs_migrate_page(struct
+ rc = migrate_huge_page_move_mapping(mapping, newpage, page);
+ if (rc != MIGRATEPAGE_SUCCESS)
+ return rc;
++
++ /*
++ * page_private is subpool pointer in hugetlb pages. Transfer to
++ * new page. PagePrivate is not associated with page_private for
++ * hugetlb pages and can not be set here as only page_huge_active
++ * pages can be migrated.
++ */
++ if (page_private(page)) {
++ set_page_private(newpage, page_private(page));
++ set_page_private(page, 0);
++ }
++
+ if (mode != MIGRATE_SYNC_NO_COPY)
+ migrate_page_copy(newpage, page);
+ else
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3577,7 +3577,6 @@ retry_avoidcopy:
+ copy_user_huge_page(new_page, old_page, address, vma,
+ pages_per_huge_page(h));
+ __SetPageUptodate(new_page);
+- set_page_huge_active(new_page);
+
+ mmun_start = address & huge_page_mask(h);
+ mmun_end = mmun_start + huge_page_size(h);
+@@ -3600,6 +3599,7 @@ retry_avoidcopy:
+ make_huge_pte(vma, new_page, 1));
+ page_remove_rmap(old_page, true);
+ hugepage_add_new_anon_rmap(new_page, vma, address);
++ set_page_huge_active(new_page);
+ /* Make the old page be freed below */
+ new_page = old_page;
+ }
+@@ -3682,6 +3682,7 @@ static int hugetlb_no_page(struct mm_str
+ struct page *page;
+ pte_t new_pte;
+ spinlock_t *ptl;
++ bool new_page = false;
+
+ /*
+ * Currently, we are forced to kill the process in the event the
+@@ -3747,7 +3748,7 @@ retry:
+ }
+ clear_huge_page(page, address, pages_per_huge_page(h));
+ __SetPageUptodate(page);
+- set_page_huge_active(page);
++ new_page = true;
+
+ if (vma->vm_flags & VM_MAYSHARE) {
+ int err = huge_add_to_page_cache(page, mapping, idx);
+@@ -3818,6 +3819,15 @@ retry:
+ }
+
+ spin_unlock(ptl);
++
++ /*
++ * Only make newly allocated pages active. Existing pages found
++ * in the pagecache could be !page_huge_active() if they have been
++ * isolated for migration.
++ */
++ if (new_page)
++ set_page_huge_active(page);
++
+ unlock_page(page);
+ out:
+ return ret;
+@@ -4053,7 +4063,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_s
+ * the set_pte_at() write.
+ */
+ __SetPageUptodate(page);
+- set_page_huge_active(page);
+
+ mapping = dst_vma->vm_file->f_mapping;
+ idx = vma_hugecache_offset(h, dst_vma, dst_addr);
+@@ -4121,6 +4130,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_s
+ update_mmu_cache(dst_vma, dst_addr, dst_pte);
+
+ spin_unlock(ptl);
++ set_page_huge_active(page);
+ if (vm_shared)
+ unlock_page(page);
+ ret = 0;
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1303,6 +1303,16 @@ static int unmap_and_move_huge_page(new_
+ lock_page(hpage);
+ }
+
++ /*
++ * Check for pages which are in the process of being freed. Without
++ * page_mapping() set, hugetlbfs specific move page routine will not
++ * be called and we could leak usage counts for subpools.
++ */
++ if (page_private(hpage) && !page_mapping(hpage)) {
++ rc = -EBUSY;
++ goto out_unlock;
++ }
++
+ if (PageAnon(hpage))
+ anon_vma = page_get_anon_vma(hpage);
+
+@@ -1334,6 +1344,7 @@ put_anon:
+ set_page_owner_migrate_reason(new_hpage, reason);
+ }
+
++out_unlock:
+ unlock_page(hpage);
+ out:
+ if (rc != -EAGAIN)
--- /dev/null
+From 72faa7a773ca59336f3c889e878de81445c5a85c Mon Sep 17 00:00:00 2001
+From: Liu Xiang <liu.xiang6@zte.com.cn>
+Date: Sat, 16 Feb 2019 17:12:24 +0800
+Subject: MIPS: irq: Allocate accurate order pages for irq stack
+
+From: Liu Xiang <liu.xiang6@zte.com.cn>
+
+commit 72faa7a773ca59336f3c889e878de81445c5a85c upstream.
+
+The irq_pages is the number of pages for irq stack, but not the
+order which is needed by __get_free_pages().
+We can use get_order() to calculate the accurate order.
+
+Signed-off-by: Liu Xiang <liu.xiang6@zte.com.cn>
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Fixes: fe8bd18ffea5 ("MIPS: Introduce irq_stack")
+Cc: linux-mips@vger.kernel.org
+Cc: stable@vger.kernel.org # v4.11+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/irq.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
+ void __init init_IRQ(void)
+ {
+ int i;
++ unsigned int order = get_order(IRQ_STACK_SIZE);
+
+ for (i = 0; i < NR_IRQS; i++)
+ irq_set_noprobe(i);
+@@ -62,8 +63,7 @@ void __init init_IRQ(void)
+ arch_init_irq();
+
+ for_each_possible_cpu(i) {
+- int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
+- void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
++ void *s = (void *)__get_free_pages(GFP_KERNEL, order);
+
+ irq_stack[i] = s;
+ pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
net-phy-micrel-ksz8061-link-failure-after-cable-connect.patch
net-dsa-mv88e6xxx-fix-statistics-on-mv88e6161.patch
x86-cpu-amd-set-the-cpb-bit-unconditionally-on-f17h.patch
+applicom-fix-potential-spectre-v1-vulnerabilities.patch
+mips-irq-allocate-accurate-order-pages-for-irq-stack.patch
+hugetlbfs-fix-races-and-page-leaks-during-migration.patch
+xtensa-fix-get_wchan.patch
+bluetooth-fix-locking-in-bt_accept_enqueue-for-bh-context.patch
--- /dev/null
+From d90b88fd3653f1fb66ecc6571b860d5a5749fa56 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Wed, 2 Jan 2019 01:08:32 -0800
+Subject: xtensa: fix get_wchan
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit d90b88fd3653f1fb66ecc6571b860d5a5749fa56 upstream.
+
+Stack unwinding is implemented incorrectly in xtensa get_wchan: instead
+of extracting a0 and a1 registers from the spill location under the
+stack pointer it extracts a word pointed to by the stack pointer and
+subtracts 4 or 3 from it.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/kernel/process.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/xtensa/kernel/process.c
++++ b/arch/xtensa/kernel/process.c
+@@ -314,8 +314,8 @@ unsigned long get_wchan(struct task_stru
+
+ /* Stack layout: sp-4: ra, sp-3: sp' */
+
+- pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
+- sp = *(unsigned long *)sp - 3;
++ pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
++ sp = SPILL_SLOT(sp, 1);
+ } while (count++ < 16);
+ return 0;
+ }