--- /dev/null
+From 4b50bcc7eda4d3cc9e3f2a0aa60e590fedf728c5 Mon Sep 17 00:00:00 2001
+From: Stefan Bader <stefan.bader@canonical.com>
+Date: Fri, 20 May 2016 16:58:38 -0700
+Subject: mm: use phys_addr_t for reserve_bootmem_region() arguments
+
+From: Stefan Bader <stefan.bader@canonical.com>
+
+commit 4b50bcc7eda4d3cc9e3f2a0aa60e590fedf728c5 upstream.
+
+Since commit 92923ca3aace ("mm: meminit: only set page reserved in the
+memblock region") the reserved bit is set on reserved memblock regions.
+However start and end address are passed as unsigned long. This is only
+32bit on i386, so it can end up marking the wrong pages reserved for
+ranges at 4GB and above.
+
+This was observed on a 32bit Xen dom0 which was booted with initial
+memory set to a value below 4G but allowing to balloon in memory
+(dom0_mem=1024M for example). This would define a reserved bootmem
+region for the additional memory (for example on a 8GB system there was
+a reverved region covering the 4GB-8GB range). But since the addresses
+were passed on as unsigned long, this was actually marking all pages
+from 0 to 4GB as reserved.
+
+Fixes: 92923ca3aacef63 ("mm: meminit: only set page reserved in the memblock region")
+Link: http://lkml.kernel.org/r/1463491221-10573-1-git-send-email-stefan.bader@canonical.com
+Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/mm.h | 2 +-
+ mm/page_alloc.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1696,7 +1696,7 @@ extern void free_highmem_page(struct pag
+ extern void adjust_managed_page_count(struct page *page, long count);
+ extern void mem_init_print_info(const char *str);
+
+-extern void reserve_bootmem_region(unsigned long start, unsigned long end);
++extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
+
+ /* Free the reserved page into the buddy system, so it gets managed. */
+ static inline void __free_reserved_page(struct page *page)
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -951,7 +951,7 @@ static inline void init_reserved_page(un
+ * marks the pages PageReserved. The remaining valid pages are later
+ * sent to the buddy page allocator.
+ */
+-void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
++void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
+ {
+ unsigned long start_pfn = PFN_DOWN(start);
+ unsigned long end_pfn = PFN_UP(end);
--- /dev/null
+From 759c01142a5d0f364a462346168a56de28a80f52 Mon Sep 17 00:00:00 2001
+From: Willy Tarreau <w@1wt.eu>
+Date: Mon, 18 Jan 2016 16:36:09 +0100
+Subject: pipe: limit the per-user amount of pages allocated in pipes
+
+From: Willy Tarreau <w@1wt.eu>
+
+commit 759c01142a5d0f364a462346168a56de28a80f52 upstream.
+
+On no-so-small systems, it is possible for a single process to cause an
+OOM condition by filling large pipes with data that are never read. A
+typical process filling 4000 pipes with 1 MB of data will use 4 GB of
+memory. On small systems it may be tricky to set the pipe max size to
+prevent this from happening.
+
+This patch makes it possible to enforce a per-user soft limit above
+which new pipes will be limited to a single page, effectively limiting
+them to 4 kB each, as well as a hard limit above which no new pipes may
+be created for this user. This has the effect of protecting the system
+against memory abuse without hurting other users, and still allowing
+pipes to work correctly though with less data at once.
+
+The limit are controlled by two new sysctls : pipe-user-pages-soft, and
+pipe-user-pages-hard. Both may be disabled by setting them to zero. The
+default soft limit allows the default number of FDs per process (1024)
+to create pipes of the default size (64kB), thus reaching a limit of 64MB
+before starting to create only smaller pipes. With 256 processes limited
+to 1024 FDs each, this results in 1024*64kB + (256*1024 - 1024) * 4kB =
+1084 MB of memory allocated for a user. The hard limit is disabled by
+default to avoid breaking existing applications that make intensive use
+of pipes (eg: for splicing).
+
+Reported-by: socketpair@gmail.com
+Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Mitigates: CVE-2013-4312 (Linux 2.0+)
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Willy Tarreau <w@1wt.eu>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Moritz Muehlenhoff <moritz@wikimedia.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/sysctl/fs.txt | 23 +++++++++++++++++++++
+ fs/pipe.c | 47 ++++++++++++++++++++++++++++++++++++++++++--
+ include/linux/pipe_fs_i.h | 4 +++
+ include/linux/sched.h | 1
+ kernel/sysctl.c | 14 +++++++++++++
+ 5 files changed, 87 insertions(+), 2 deletions(-)
+
+--- a/Documentation/sysctl/fs.txt
++++ b/Documentation/sysctl/fs.txt
+@@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/
+ - nr_open
+ - overflowuid
+ - overflowgid
++- pipe-user-pages-hard
++- pipe-user-pages-soft
+ - protected_hardlinks
+ - protected_symlinks
+ - suid_dumpable
+@@ -159,6 +161,27 @@ The default is 65534.
+
+ ==============================================================
+
++pipe-user-pages-hard:
++
++Maximum total number of pages a non-privileged user may allocate for pipes.
++Once this limit is reached, no new pipes may be allocated until usage goes
++below the limit again. When set to 0, no limit is applied, which is the default
++setting.
++
++==============================================================
++
++pipe-user-pages-soft:
++
++Maximum total number of pages a non-privileged user may allocate for pipes
++before the pipe size gets limited to a single page. Once this limit is reached,
++new pipes will be limited to a single page in size for this user in order to
++limit total memory usage, and trying to increase them using fcntl() will be
++denied until usage goes below the limit again. The default value allows to
++allocate up to 1024 pipes at their default size. When set to 0, no limit is
++applied.
++
++==============================================================
++
+ protected_hardlinks:
+
+ A long-standing class of security issues is the hardlink-based
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -38,6 +38,12 @@ unsigned int pipe_max_size = 1048576;
+ */
+ unsigned int pipe_min_size = PAGE_SIZE;
+
++/* Maximum allocatable pages per user. Hard limit is unset by default, soft
++ * matches default values.
++ */
++unsigned long pipe_user_pages_hard;
++unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
++
+ /*
+ * We use a start+len construction, which provides full use of the
+ * allocated memory.
+@@ -583,20 +589,49 @@ pipe_fasync(int fd, struct file *filp, i
+ return retval;
+ }
+
++static void account_pipe_buffers(struct pipe_inode_info *pipe,
++ unsigned long old, unsigned long new)
++{
++ atomic_long_add(new - old, &pipe->user->pipe_bufs);
++}
++
++static bool too_many_pipe_buffers_soft(struct user_struct *user)
++{
++ return pipe_user_pages_soft &&
++ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
++}
++
++static bool too_many_pipe_buffers_hard(struct user_struct *user)
++{
++ return pipe_user_pages_hard &&
++ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
++}
++
+ struct pipe_inode_info *alloc_pipe_info(void)
+ {
+ struct pipe_inode_info *pipe;
+
+ pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
+ if (pipe) {
+- pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
++ unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
++ struct user_struct *user = get_current_user();
++
++ if (!too_many_pipe_buffers_hard(user)) {
++ if (too_many_pipe_buffers_soft(user))
++ pipe_bufs = 1;
++ pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
++ }
++
+ if (pipe->bufs) {
+ init_waitqueue_head(&pipe->wait);
+ pipe->r_counter = pipe->w_counter = 1;
+- pipe->buffers = PIPE_DEF_BUFFERS;
++ pipe->buffers = pipe_bufs;
++ pipe->user = user;
++ account_pipe_buffers(pipe, 0, pipe_bufs);
+ mutex_init(&pipe->mutex);
+ return pipe;
+ }
++ free_uid(user);
+ kfree(pipe);
+ }
+
+@@ -607,6 +642,8 @@ void free_pipe_info(struct pipe_inode_in
+ {
+ int i;
+
++ account_pipe_buffers(pipe, pipe->buffers, 0);
++ free_uid(pipe->user);
+ for (i = 0; i < pipe->buffers; i++) {
+ struct pipe_buffer *buf = pipe->bufs + i;
+ if (buf->ops)
+@@ -998,6 +1035,7 @@ static long pipe_set_size(struct pipe_in
+ memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
+ }
+
++ account_pipe_buffers(pipe, pipe->buffers, nr_pages);
+ pipe->curbuf = 0;
+ kfree(pipe->bufs);
+ pipe->bufs = bufs;
+@@ -1069,6 +1107,11 @@ long pipe_fcntl(struct file *file, unsig
+ if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
+ ret = -EPERM;
+ goto out;
++ } else if ((too_many_pipe_buffers_hard(pipe->user) ||
++ too_many_pipe_buffers_soft(pipe->user)) &&
++ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
++ ret = -EPERM;
++ goto out;
+ }
+ ret = pipe_set_size(pipe, nr_pages);
+ break;
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -42,6 +42,7 @@ struct pipe_buffer {
+ * @fasync_readers: reader side fasync
+ * @fasync_writers: writer side fasync
+ * @bufs: the circular array of pipe buffers
++ * @user: the user who created this pipe
+ **/
+ struct pipe_inode_info {
+ struct mutex mutex;
+@@ -57,6 +58,7 @@ struct pipe_inode_info {
+ struct fasync_struct *fasync_readers;
+ struct fasync_struct *fasync_writers;
+ struct pipe_buffer *bufs;
++ struct user_struct *user;
+ };
+
+ /*
+@@ -123,6 +125,8 @@ void pipe_unlock(struct pipe_inode_info
+ void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
+
+ extern unsigned int pipe_max_size, pipe_min_size;
++extern unsigned long pipe_user_pages_hard;
++extern unsigned long pipe_user_pages_soft;
+ int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -831,6 +831,7 @@ struct user_struct {
+ #endif
+ unsigned long locked_shm; /* How many pages of mlocked shm ? */
+ unsigned long unix_inflight; /* How many files in flight in unix sockets */
++ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
+
+ #ifdef CONFIG_KEYS
+ struct key *uid_keyring; /* UID specific keyring */
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1735,6 +1735,20 @@ static struct ctl_table fs_table[] = {
+ .proc_handler = &pipe_proc_fn,
+ .extra1 = &pipe_min_size,
+ },
++ {
++ .procname = "pipe-user-pages-hard",
++ .data = &pipe_user_pages_hard,
++ .maxlen = sizeof(pipe_user_pages_hard),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax,
++ },
++ {
++ .procname = "pipe-user-pages-soft",
++ .data = &pipe_user_pages_soft,
++ .maxlen = sizeof(pipe_user_pages_soft),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax,
++ },
+ { }
+ };
+
--- /dev/null
+From 11ca2b7ab432eb90906168c327733575e68d388f Mon Sep 17 00:00:00 2001
+From: Zhao Qiang <qiang.zhao@nxp.com>
+Date: Wed, 9 Mar 2016 09:48:11 +0800
+Subject: QE-UART: add "fsl,t1040-ucc-uart" to of_device_id
+
+From: Zhao Qiang <qiang.zhao@nxp.com>
+
+commit 11ca2b7ab432eb90906168c327733575e68d388f upstream.
+
+New bindings use "fsl,t1040-ucc-uart" as the compatible for qe-uart.
+So add it.
+
+Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/ucc_uart.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/tty/serial/ucc_uart.c
++++ b/drivers/tty/serial/ucc_uart.c
+@@ -1478,6 +1478,9 @@ static const struct of_device_id ucc_uar
+ .type = "serial",
+ .compatible = "ucc_uart",
+ },
++ {
++ .compatible = "fsl,t1040-ucc-uart",
++ },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, ucc_uart_match);
pinctrl-exynos5440-use-off-stack-memory-for-pinctrl_gpio_range.patch
pci-disable-all-bar-sizing-for-devices-with-non-compliant-bars.patch
media-v4l2-compat-ioctl32-fix-missing-reserved-field-copy-in-put_v4l2_create32.patch
+mm-use-phys_addr_t-for-reserve_bootmem_region-arguments.patch
+wait-ptrace-assume-__wall-if-the-child-is-traced.patch
+qe-uart-add-fsl-t1040-ucc-uart-to-of_device_id.patch
+usbvision-fix-overflow-of-interfaces-array.patch
+pipe-limit-the-per-user-amount-of-pages-allocated-in-pipes.patch
--- /dev/null
+From 588afcc1c0e45358159090d95bf7b246fb67565f Mon Sep 17 00:00:00 2001
+From: Oliver Neukum <oneukum@suse.com>
+Date: Tue, 27 Oct 2015 09:51:34 -0200
+Subject: [media] usbvision fix overflow of interfaces array
+
+From: Oliver Neukum <oneukum@suse.com>
+
+commit 588afcc1c0e45358159090d95bf7b246fb67565f upstream.
+
+This fixes the crash reported in:
+http://seclists.org/bugtraq/2015/Oct/35
+The interface number needs a sanity check.
+
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Cc: Vladis Dronov <vdronov@redhat.com>
+Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+Cc: Moritz Muehlenhoff <moritz@wikimedia.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/usb/usbvision/usbvision-video.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/media/usb/usbvision/usbvision-video.c
++++ b/drivers/media/usb/usbvision/usbvision-video.c
+@@ -1461,6 +1461,13 @@ static int usbvision_probe(struct usb_in
+ printk(KERN_INFO "%s: %s found\n", __func__,
+ usbvision_device_data[model].model_string);
+
++ /*
++ * this is a security check.
++ * an exploit using an incorrect bInterfaceNumber is known
++ */
++ if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
++ return -ENODEV;
++
+ if (usbvision_device_data[model].interface >= 0)
+ interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
+ else if (ifnum < dev->actconfig->desc.bNumInterfaces)
--- /dev/null
+From bf959931ddb88c4e4366e96dd22e68fa0db9527c Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Mon, 23 May 2016 16:23:50 -0700
+Subject: wait/ptrace: assume __WALL if the child is traced
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+commit bf959931ddb88c4e4366e96dd22e68fa0db9527c upstream.
+
+The following program (simplified version of generated by syzkaller)
+
+ #include <pthread.h>
+ #include <unistd.h>
+ #include <sys/ptrace.h>
+ #include <stdio.h>
+ #include <signal.h>
+
+ void *thread_func(void *arg)
+ {
+ ptrace(PTRACE_TRACEME, 0,0,0);
+ return 0;
+ }
+
+ int main(void)
+ {
+ pthread_t thread;
+
+ if (fork())
+ return 0;
+
+ while (getppid() != 1)
+ ;
+
+ pthread_create(&thread, NULL, thread_func, NULL);
+ pthread_join(thread, NULL);
+ return 0;
+ }
+
+creates an unreapable zombie if /sbin/init doesn't use __WALL.
+
+This is not a kernel bug, at least in a sense that everything works as
+expected: debugger should reap a traced sub-thread before it can reap the
+leader, but without __WALL/__WCLONE do_wait() ignores sub-threads.
+
+Unfortunately, it seems that /sbin/init in most (all?) distributions
+doesn't use it and we have to change the kernel to avoid the problem.
+Note also that most init's use sys_waitid() which doesn't allow __WALL, so
+the necessary user-space fix is not that trivial.
+
+This patch just adds the "ptrace" check into eligible_child(). To some
+degree this matches the "tsk->ptrace" in exit_notify(), ->exit_signal is
+mostly ignored when the tracee reports to debugger. Or WSTOPPED, the
+tracer doesn't need to set this flag to wait for the stopped tracee.
+
+This obviously means the user-visible change: __WCLONE and __WALL no
+longer have any meaning for debugger. And I can only hope that this won't
+break something, but at least strace/gdb won't suffer.
+
+We could make a more conservative change. Say, we can take __WCLONE into
+account, or !thread_group_leader(). But it would be nice to not
+complicate these historical/confusing checks.
+
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Jan Kratochvil <jan.kratochvil@redhat.com>
+Cc: "Michael Kerrisk (man-pages)" <mtk.manpages@gmail.com>
+Cc: Pedro Alves <palves@redhat.com>
+Cc: Roland McGrath <roland@hack.frob.com>
+Cc: <syzkaller@googlegroups.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/exit.c | 29 ++++++++++++++++++++---------
+ 1 file changed, 20 insertions(+), 9 deletions(-)
+
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -918,17 +918,28 @@ static int eligible_pid(struct wait_opts
+ task_pid_type(p, wo->wo_type) == wo->wo_pid;
+ }
+
+-static int eligible_child(struct wait_opts *wo, struct task_struct *p)
++static int
++eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
+ {
+ if (!eligible_pid(wo, p))
+ return 0;
+- /* Wait for all children (clone and not) if __WALL is set;
+- * otherwise, wait for clone children *only* if __WCLONE is
+- * set; otherwise, wait for non-clone children *only*. (Note:
+- * A "clone" child here is one that reports to its parent
+- * using a signal other than SIGCHLD.) */
+- if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
+- && !(wo->wo_flags & __WALL))
++
++ /*
++ * Wait for all children (clone and not) if __WALL is set or
++ * if it is traced by us.
++ */
++ if (ptrace || (wo->wo_flags & __WALL))
++ return 1;
++
++ /*
++ * Otherwise, wait for clone children *only* if __WCLONE is set;
++ * otherwise, wait for non-clone children *only*.
++ *
++ * Note: a "clone" child here is one that reports to its parent
++ * using a signal other than SIGCHLD, or a non-leader thread which
++ * we can only see if it is traced by us.
++ */
++ if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
+ return 0;
+
+ return 1;
+@@ -1301,7 +1312,7 @@ static int wait_consider_task(struct wai
+ if (unlikely(exit_state == EXIT_DEAD))
+ return 0;
+
+- ret = eligible_child(wo, p);
++ ret = eligible_child(wo, ptrace, p);
+ if (!ret)
+ return ret;
+