--- /dev/null
+From 325c50e3cebb9208009083e841550f98a863bfa0 Mon Sep 17 00:00:00 2001
+From: Jeff Mahoney <jeffm@suse.com>
+Date: Wed, 21 Sep 2016 08:31:29 -0400
+Subject: btrfs: ensure that file descriptor used with subvol ioctls is a dir
+
+From: Jeff Mahoney <jeffm@suse.com>
+
+commit 325c50e3cebb9208009083e841550f98a863bfa0 upstream.
+
+If the subvol/snapshot create/destroy ioctls are passed a regular file
+with execute permissions set, we'll eventually Oops while trying to do
+inode->i_op->lookup via lookup_one_len.
+
+This patch ensures that the file descriptor refers to a directory.
+
+Fixes: cb8e70901d (Btrfs: Fix subvolume creation locking rules)
+Fixes: 76dda93c6a (Btrfs: add snapshot/subvolume destroy ioctl)
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/ioctl.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1619,6 +1619,9 @@ static noinline int btrfs_ioctl_snap_cre
+ int namelen;
+ int ret = 0;
+
++ if (!S_ISDIR(file_inode(file)->i_mode))
++ return -ENOTDIR;
++
+ ret = mnt_want_write_file(file);
+ if (ret)
+ goto out;
+@@ -1676,6 +1679,9 @@ static noinline int btrfs_ioctl_snap_cre
+ struct btrfs_ioctl_vol_args *vol_args;
+ int ret;
+
++ if (!S_ISDIR(file_inode(file)->i_mode))
++ return -ENOTDIR;
++
+ vol_args = memdup_user(arg, sizeof(*vol_args));
+ if (IS_ERR(vol_args))
+ return PTR_ERR(vol_args);
+@@ -1699,6 +1705,9 @@ static noinline int btrfs_ioctl_snap_cre
+ bool readonly = false;
+ struct btrfs_qgroup_inherit *inherit = NULL;
+
++ if (!S_ISDIR(file_inode(file)->i_mode))
++ return -ENOTDIR;
++
+ vol_args = memdup_user(arg, sizeof(*vol_args));
+ if (IS_ERR(vol_args))
+ return PTR_ERR(vol_args);
+@@ -2345,6 +2354,9 @@ static noinline int btrfs_ioctl_snap_des
+ int ret;
+ int err = 0;
+
++ if (!S_ISDIR(dir->i_mode))
++ return -ENOTDIR;
++
+ vol_args = memdup_user(arg, sizeof(*vol_args));
+ if (IS_ERR(vol_args))
+ return PTR_ERR(vol_args);
--- /dev/null
+From 4de349e786a3a2d51bd02d56f3de151bbc3c3df9 Mon Sep 17 00:00:00 2001
+From: Fabio Estevam <fabio.estevam@nxp.com>
+Date: Wed, 17 Aug 2016 12:41:08 -0300
+Subject: can: flexcan: fix resume function
+
+From: Fabio Estevam <fabio.estevam@nxp.com>
+
+commit 4de349e786a3a2d51bd02d56f3de151bbc3c3df9 upstream.
+
+On a imx6ul-pico board the following error is seen during system suspend:
+
+dpm_run_callback(): platform_pm_resume+0x0/0x54 returns -110
+PM: Device 2090000.flexcan failed to resume: error -110
+
+The reason for this suspend error is because when the CAN interface is not
+active the clocks are disabled and then flexcan_chip_enable() will
+always fail due to a timeout error.
+
+In order to fix this issue, only call flexcan_chip_enable/disable()
+when the CAN interface is active.
+
+Based on a patch from Dong Aisheng in the NXP kernel.
+
+Signed-off-by: Fabio Estevam <fabio.estevam@nxp.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/flexcan.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspen
+ struct flexcan_priv *priv = netdev_priv(dev);
+ int err;
+
+- err = flexcan_chip_disable(priv);
+- if (err)
+- return err;
+-
+ if (netif_running(dev)) {
++ err = flexcan_chip_disable(priv);
++ if (err)
++ return err;
+ netif_stop_queue(dev);
+ netif_device_detach(dev);
+ }
+@@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume
+ {
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
++ int err;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ if (netif_running(dev)) {
+ netif_device_attach(dev);
+ netif_start_queue(dev);
++ err = flexcan_chip_enable(priv);
++ if (err)
++ return err;
+ }
+- return flexcan_chip_enable(priv);
++ return 0;
+ }
+
+ static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
--- /dev/null
+From 96d41019e3ac55f6f0115b0ce97e4f24a3d636d2 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 19 Sep 2016 14:44:30 -0700
+Subject: fanotify: fix list corruption in fanotify_get_response()
+
+From: Jan Kara <jack@suse.cz>
+
+commit 96d41019e3ac55f6f0115b0ce97e4f24a3d636d2 upstream.
+
+fanotify_get_response() calls fsnotify_remove_event() when it finds that
+group is being released from fanotify_release() (bypass_perm is set).
+
+However the event it removes need not be only in the group's notification
+queue but it can have already moved to access_list (userspace read the
+event before closing the fanotify instance fd) which is protected by a
+different lock. Thus when fsnotify_remove_event() races with
+fanotify_release() operating on access_list, the list can get corrupted.
+
+Fix the problem by moving all the logic removing permission events from
+the lists to one place - fanotify_release().
+
+Fixes: 5838d4442bd5 ("fanotify: fix double free of pending permission events")
+Link: http://lkml.kernel.org/r/1473797711-14111-3-git-send-email-jack@suse.cz
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reported-by: Miklos Szeredi <mszeredi@redhat.com>
+Tested-by: Miklos Szeredi <mszeredi@redhat.com>
+Reviewed-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/notify/fanotify/fanotify.c | 13 +------------
+ fs/notify/fanotify/fanotify_user.c | 36 ++++++++++++++++++++++++------------
+ fs/notify/notification.c | 15 ---------------
+ include/linux/fsnotify_backend.h | 3 ---
+ 4 files changed, 25 insertions(+), 42 deletions(-)
+
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -67,18 +67,7 @@ static int fanotify_get_response(struct
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+- wait_event(group->fanotify_data.access_waitq, event->response ||
+- atomic_read(&group->fanotify_data.bypass_perm));
+-
+- if (!event->response) { /* bypass_perm set */
+- /*
+- * Event was canceled because group is being destroyed. Remove
+- * it from group's event list because we are responsible for
+- * freeing the permission event.
+- */
+- fsnotify_remove_event(group, &event->fae.fse);
+- return 0;
+- }
++ wait_event(group->fanotify_data.access_waitq, event->response);
+
+ /* userspace responded, convert to something usable */
+ switch (event->response) {
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -358,16 +358,20 @@ static int fanotify_release(struct inode
+
+ #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ struct fanotify_perm_event_info *event, *next;
++ struct fsnotify_event *fsn_event;
+
+ /*
+- * There may be still new events arriving in the notification queue
+- * but since userspace cannot use fanotify fd anymore, no event can
+- * enter or leave access_list by now.
++ * Stop new events from arriving in the notification queue. since
++ * userspace cannot use fanotify fd anymore, no event can enter or
++ * leave access_list by now either.
+ */
+- spin_lock(&group->fanotify_data.access_lock);
+-
+- atomic_inc(&group->fanotify_data.bypass_perm);
++ fsnotify_group_stop_queueing(group);
+
++ /*
++ * Process all permission events on access_list and notification queue
++ * and simulate reply from userspace.
++ */
++ spin_lock(&group->fanotify_data.access_lock);
+ list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
+ fae.fse.list) {
+ pr_debug("%s: found group=%p event=%p\n", __func__, group,
+@@ -379,12 +383,21 @@ static int fanotify_release(struct inode
+ spin_unlock(&group->fanotify_data.access_lock);
+
+ /*
+- * Since bypass_perm is set, newly queued events will not wait for
+- * access response. Wake up the already sleeping ones now.
+- * synchronize_srcu() in fsnotify_destroy_group() will wait for all
+- * processes sleeping in fanotify_handle_event() waiting for access
+- * response and thus also for all permission events to be freed.
++ * Destroy all non-permission events. For permission events just
++ * dequeue them and set the response. They will be freed once the
++ * response is consumed and fanotify_get_response() returns.
+ */
++ mutex_lock(&group->notification_mutex);
++ while (!fsnotify_notify_queue_is_empty(group)) {
++ fsn_event = fsnotify_remove_first_event(group);
++ if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
++ fsnotify_destroy_event(group, fsn_event);
++ else
++ FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
++ }
++ mutex_unlock(&group->notification_mutex);
++
++ /* Response for all permission events it set, wakeup waiters */
+ wake_up(&group->fanotify_data.access_waitq);
+ #endif
+
+@@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned
+ spin_lock_init(&group->fanotify_data.access_lock);
+ init_waitqueue_head(&group->fanotify_data.access_waitq);
+ INIT_LIST_HEAD(&group->fanotify_data.access_list);
+- atomic_set(&group->fanotify_data.bypass_perm, 0);
+ #endif
+ switch (flags & FAN_ALL_CLASS_BITS) {
+ case FAN_CLASS_NOTIF:
+--- a/fs/notify/notification.c
++++ b/fs/notify/notification.c
+@@ -132,21 +132,6 @@ queue:
+ }
+
+ /*
+- * Remove @event from group's notification queue. It is the responsibility of
+- * the caller to destroy the event.
+- */
+-void fsnotify_remove_event(struct fsnotify_group *group,
+- struct fsnotify_event *event)
+-{
+- mutex_lock(&group->notification_mutex);
+- if (!list_empty(&event->list)) {
+- list_del_init(&event->list);
+- group->q_len--;
+- }
+- mutex_unlock(&group->notification_mutex);
+-}
+-
+-/*
+ * Remove and return the first event from the notification list. It is the
+ * responsibility of the caller to destroy the obtained event
+ */
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -180,7 +180,6 @@ struct fsnotify_group {
+ spinlock_t access_lock;
+ struct list_head access_list;
+ wait_queue_head_t access_waitq;
+- atomic_t bypass_perm;
+ #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
+ int f_flags;
+ unsigned int max_marks;
+@@ -323,8 +322,6 @@ extern int fsnotify_add_event(struct fsn
+ struct fsnotify_event *event,
+ int (*merge)(struct list_head *,
+ struct fsnotify_event *));
+-/* Remove passed event from groups notification queue */
+-extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
+ /* true if the group notification queue is empty */
+ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
+ /* return, but do not dequeue the first event on the notification queue */
--- /dev/null
+From e23d4159b109167126e5bcd7f3775c95de7fee47 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@ZenIV.linux.org.uk>
+Date: Tue, 20 Sep 2016 20:07:42 +0100
+Subject: fix fault_in_multipages_...() on architectures with no-op access_ok()
+
+From: Al Viro <viro@ZenIV.linux.org.uk>
+
+commit e23d4159b109167126e5bcd7f3775c95de7fee47 upstream.
+
+Switching iov_iter fault-in to multipages variants has exposed an old
+bug in underlying fault_in_multipages_...(); they break if the range
+passed to them wraps around. Normally access_ok() done by callers will
+prevent such (and it's a guaranteed EFAULT - ERR_PTR() values fall into
+such a range and they should not point to any valid objects).
+
+However, on architectures where userland and kernel live in different
+MMU contexts (e.g. s390) access_ok() is a no-op and on those a range
+with a wraparound can reach fault_in_multipages_...().
+
+Since any wraparound means EFAULT there, the fix is trivial - turn
+those
+
+ while (uaddr <= end)
+ ...
+into
+
+ if (unlikely(uaddr > end))
+ return -EFAULT;
+ do
+ ...
+ while (uaddr <= end);
+
+Reported-by: Jan Stancek <jstancek@redhat.com>
+Tested-by: Jan Stancek <jstancek@redhat.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/pagemap.h | 38 +++++++++++++++++++-------------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -601,56 +601,56 @@ static inline int fault_in_pages_readabl
+ */
+ static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
+ {
+- int ret = 0;
+ char __user *end = uaddr + size - 1;
+
+ if (unlikely(size == 0))
+- return ret;
++ return 0;
+
++ if (unlikely(uaddr > end))
++ return -EFAULT;
+ /*
+ * Writing zeroes into userspace here is OK, because we know that if
+ * the zero gets there, we'll be overwriting it.
+ */
+- while (uaddr <= end) {
+- ret = __put_user(0, uaddr);
+- if (ret != 0)
+- return ret;
++ do {
++ if (unlikely(__put_user(0, uaddr) != 0))
++ return -EFAULT;
+ uaddr += PAGE_SIZE;
+- }
++ } while (uaddr <= end);
+
+ /* Check whether the range spilled into the next page. */
+ if (((unsigned long)uaddr & PAGE_MASK) ==
+ ((unsigned long)end & PAGE_MASK))
+- ret = __put_user(0, end);
++ return __put_user(0, end);
+
+- return ret;
++ return 0;
+ }
+
+ static inline int fault_in_multipages_readable(const char __user *uaddr,
+ int size)
+ {
+ volatile char c;
+- int ret = 0;
+ const char __user *end = uaddr + size - 1;
+
+ if (unlikely(size == 0))
+- return ret;
++ return 0;
++
++ if (unlikely(uaddr > end))
++ return -EFAULT;
+
+- while (uaddr <= end) {
+- ret = __get_user(c, uaddr);
+- if (ret != 0)
+- return ret;
++ do {
++ if (unlikely(__get_user(c, uaddr) != 0))
++ return -EFAULT;
+ uaddr += PAGE_SIZE;
+- }
++ } while (uaddr <= end);
+
+ /* Check whether the range spilled into the next page. */
+ if (((unsigned long)uaddr & PAGE_MASK) ==
+ ((unsigned long)end & PAGE_MASK)) {
+- ret = __get_user(c, end);
+- (void)c;
++ return __get_user(c, end);
+ }
+
+- return ret;
++ return 0;
+ }
+
+ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
--- /dev/null
+From 1ae2293dd6d2f5c823cf97e60b70d03631cd622f Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Sat, 17 Sep 2016 18:31:46 -0400
+Subject: fix memory leaks in tracing_buffers_splice_read()
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 1ae2293dd6d2f5c823cf97e60b70d03631cd622f upstream.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5766,9 +5766,6 @@ tracing_buffers_splice_read(struct file
+ return -EBUSY;
+ #endif
+
+- if (splice_grow_spd(pipe, &spd))
+- return -ENOMEM;
+-
+ if (*ppos & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+@@ -5778,6 +5775,9 @@ tracing_buffers_splice_read(struct file
+ len &= PAGE_MASK;
+ }
+
++ if (splice_grow_spd(pipe, &spd))
++ return -ENOMEM;
++
+ again:
+ trace_access_lock(iter->cpu_file);
+ entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
+@@ -5835,19 +5835,21 @@ tracing_buffers_splice_read(struct file
+ /* did we read anything? */
+ if (!spd.nr_pages) {
+ if (ret)
+- return ret;
++ goto out;
+
++ ret = -EAGAIN;
+ if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
+- return -EAGAIN;
++ goto out;
+
+ ret = wait_on_pipe(iter, true);
+ if (ret)
+- return ret;
++ goto out;
+
+ goto again;
+ }
+
+ ret = splice_to_pipe(pipe, &spd);
++out:
+ splice_shrink_spd(&spd);
+
+ return ret;
--- /dev/null
+From 12703dbfeb15402260e7554d32a34ac40c233990 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 19 Sep 2016 14:44:27 -0700
+Subject: fsnotify: add a way to stop queueing events on group shutdown
+
+From: Jan Kara <jack@suse.cz>
+
+commit 12703dbfeb15402260e7554d32a34ac40c233990 upstream.
+
+Implement a function that can be called when a group is being shutdown
+to stop queueing new events to the group. Fanotify will use this.
+
+Fixes: 5838d4442bd5 ("fanotify: fix double free of pending permission events")
+Link: http://lkml.kernel.org/r/1473797711-14111-2-git-send-email-jack@suse.cz
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/notify/group.c | 19 +++++++++++++++++++
+ fs/notify/notification.c | 8 +++++++-
+ include/linux/fsnotify_backend.h | 3 +++
+ 3 files changed, 29 insertions(+), 1 deletion(-)
+
+--- a/fs/notify/group.c
++++ b/fs/notify/group.c
+@@ -40,6 +40,17 @@ static void fsnotify_final_destroy_group
+ }
+
+ /*
++ * Stop queueing new events for this group. Once this function returns
++ * fsnotify_add_event() will not add any new events to the group's queue.
++ */
++void fsnotify_group_stop_queueing(struct fsnotify_group *group)
++{
++ mutex_lock(&group->notification_mutex);
++ group->shutdown = true;
++ mutex_unlock(&group->notification_mutex);
++}
++
++/*
+ * Trying to get rid of a group. Remove all marks, flush all events and release
+ * the group reference.
+ * Note that another thread calling fsnotify_clear_marks_by_group() may still
+@@ -47,6 +58,14 @@ static void fsnotify_final_destroy_group
+ */
+ void fsnotify_destroy_group(struct fsnotify_group *group)
+ {
++ /*
++ * Stop queueing new events. The code below is careful enough to not
++ * require this but fanotify needs to stop queuing events even before
++ * fsnotify_destroy_group() is called and this makes the other callers
++ * of fsnotify_destroy_group() to see the same behavior.
++ */
++ fsnotify_group_stop_queueing(group);
++
+ /* clear all inode marks for this group */
+ fsnotify_clear_marks_by_group(group);
+
+--- a/fs/notify/notification.c
++++ b/fs/notify/notification.c
+@@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnot
+ * Add an event to the group notification queue. The group can later pull this
+ * event off the queue to deal with. The function returns 0 if the event was
+ * added to the queue, 1 if the event was merged with some other queued event,
+- * 2 if the queue of events has overflown.
++ * 2 if the event was not queued - either the queue of events has overflown
++ * or the group is shutting down.
+ */
+ int fsnotify_add_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+@@ -96,6 +97,11 @@ int fsnotify_add_event(struct fsnotify_g
+
+ mutex_lock(&group->notification_mutex);
+
++ if (group->shutdown) {
++ mutex_unlock(&group->notification_mutex);
++ return 2;
++ }
++
+ if (group->q_len >= group->max_events) {
+ ret = 2;
+ /* Queue overflow event only if it isn't already queued */
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -148,6 +148,7 @@ struct fsnotify_group {
+ #define FS_PRIO_1 1 /* fanotify content based access control */
+ #define FS_PRIO_2 2 /* fanotify pre-content access */
+ unsigned int priority;
++ bool shutdown; /* group is being shut down, don't queue more events */
+
+ /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
+ struct mutex mark_mutex; /* protect marks_list */
+@@ -308,6 +309,8 @@ extern struct fsnotify_group *fsnotify_a
+ extern void fsnotify_get_group(struct fsnotify_group *group);
+ /* drop reference on a group from fsnotify_alloc_group */
+ extern void fsnotify_put_group(struct fsnotify_group *group);
++/* group destruction begins, stop queuing new events */
++extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
+ /* destroy group */
+ extern void fsnotify_destroy_group(struct fsnotify_group *group);
+ /* fasync handler function */
--- /dev/null
+From 371a015344b6e270e7e3632107d9554ec6d27a6b Mon Sep 17 00:00:00 2001
+From: "Yadi.hu" <yadi.hu@windriver.com>
+Date: Sun, 18 Sep 2016 18:52:31 +0800
+Subject: i2c-eg20t: fix race between i2c init and interrupt enable
+
+From: Yadi.hu <yadi.hu@windriver.com>
+
+commit 371a015344b6e270e7e3632107d9554ec6d27a6b upstream.
+
+the eg20t driver call request_irq() function before the pch_base_address,
+base address of i2c controller's register, is assigned an effective value.
+
+there is one possible scenario that an interrupt which isn't inside eg20t
+arrives immediately after request_irq() is executed when i2c controller
+shares an interrupt number with others. since the interrupt handler
+pch_i2c_handler() has already active as shared action, it will be called
+and read its own register to determine if this interrupt is from itself.
+
+At that moment, since base address of i2c registers is not remapped
+in kernel space yet,so the INT handler will access an illegal address
+and then a error occurs.
+
+Signed-off-by: Yadi.hu <yadi.hu@windriver.com>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-eg20t.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-eg20t.c
++++ b/drivers/i2c/busses/i2c-eg20t.c
+@@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev
+ /* Set the number of I2C channel instance */
+ adap_info->ch_num = id->driver_data;
+
+- ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+- KBUILD_MODNAME, adap_info);
+- if (ret) {
+- pch_pci_err(pdev, "request_irq FAILED\n");
+- goto err_request_irq;
+- }
+-
+ for (i = 0; i < adap_info->ch_num; i++) {
+ pch_adap = &adap_info->pch_data[i].pch_adapter;
+ adap_info->pch_i2c_suspended = false;
+@@ -796,6 +789,17 @@ static int pch_i2c_probe(struct pci_dev
+ adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
+
+ pch_adap->dev.parent = &pdev->dev;
++ }
++
++ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
++ KBUILD_MODNAME, adap_info);
++ if (ret) {
++ pch_pci_err(pdev, "request_irq FAILED\n");
++ goto err_request_irq;
++ }
++
++ for (i = 0; i < adap_info->ch_num; i++) {
++ pch_adap = &adap_info->pch_data[i].pch_adapter;
+
+ pch_i2c_init(&adap_info->pch_data[i]);
+
--- /dev/null
+From 331dcf421c34d227784d07943eb01e4023a42b0a Mon Sep 17 00:00:00 2001
+From: Sudeep Holla <Sudeep.Holla@arm.com>
+Date: Thu, 25 Aug 2016 12:23:39 +0100
+Subject: i2c: qup: skip qup_i2c_suspend if the device is already runtime suspended
+
+From: Sudeep Holla <Sudeep.Holla@arm.com>
+
+commit 331dcf421c34d227784d07943eb01e4023a42b0a upstream.
+
+If the i2c device is already runtime suspended, if qup_i2c_suspend is
+executed during suspend-to-idle or suspend-to-ram it will result in the
+following splat:
+
+WARNING: CPU: 3 PID: 1593 at drivers/clk/clk.c:476 clk_core_unprepare+0x80/0x90
+Modules linked in:
+
+CPU: 3 PID: 1593 Comm: bash Tainted: G W 4.8.0-rc3 #14
+Hardware name: Qualcomm Technologies, Inc. APQ 8016 SBC (DT)
+PC is at clk_core_unprepare+0x80/0x90
+LR is at clk_unprepare+0x28/0x40
+pc : [<ffff0000086eecf0>] lr : [<ffff0000086f0c58>] pstate: 60000145
+Call trace:
+ clk_core_unprepare+0x80/0x90
+ qup_i2c_disable_clocks+0x2c/0x68
+ qup_i2c_suspend+0x10/0x20
+ platform_pm_suspend+0x24/0x68
+ ...
+
+This patch fixes the issue by executing qup_i2c_pm_suspend_runtime
+conditionally in qup_i2c_suspend.
+
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Reviewed-by: Andy Gross <andy.gross@linaro.org>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-qup.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/i2c/busses/i2c-qup.c
++++ b/drivers/i2c/busses/i2c-qup.c
+@@ -727,7 +727,8 @@ static int qup_i2c_pm_resume_runtime(str
+ #ifdef CONFIG_PM_SLEEP
+ static int qup_i2c_suspend(struct device *device)
+ {
+- qup_i2c_pm_suspend_runtime(device);
++ if (!pm_runtime_suspended(device))
++ return qup_i2c_pm_suspend_runtime(device);
+ return 0;
+ }
+
--- /dev/null
+From 3cbc6fc9c99f1709203711f125bc3b79487aba06 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhc@lemote.com>
+Date: Mon, 5 Sep 2016 08:48:03 +0800
+Subject: MIPS: Add a missing ".set pop" in an early commit
+
+From: Huacai Chen <chenhc@lemote.com>
+
+commit 3cbc6fc9c99f1709203711f125bc3b79487aba06 upstream.
+
+Commit 842dfc11ea9a21 ("MIPS: Fix build with binutils 2.24.51+") missing
+a ".set pop" in macro fpu_restore_16even, so add it.
+
+Signed-off-by: Huacai Chen <chenhc@lemote.com>
+Acked-by: Manuel Lauss <manuel.lauss@gmail.com>
+Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
+Cc: Fuxin Zhang <zhangfx@lemote.com>
+Cc: Zhangjin Wu <wuzhangjin@gmail.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/14210/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/asmmacro.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/mips/include/asm/asmmacro.h
++++ b/arch/mips/include/asm/asmmacro.h
+@@ -135,6 +135,7 @@
+ ldc1 $f28, THREAD_FPR28(\thread)
+ ldc1 $f30, THREAD_FPR30(\thread)
+ ctc1 \tmp, fcr31
++ .set pop
+ .endm
+
+ .macro fpu_restore_16odd thread
--- /dev/null
+From b244614a60ab7ce54c12a9cbe15cfbf8d79d0967 Mon Sep 17 00:00:00 2001
+From: Marcin Nowakowski <marcin.nowakowski@imgtec.com>
+Date: Wed, 31 Aug 2016 12:33:23 +0200
+Subject: MIPS: Avoid a BUG warning during prctl(PR_SET_FP_MODE, ...)
+
+From: Marcin Nowakowski <marcin.nowakowski@imgtec.com>
+
+commit b244614a60ab7ce54c12a9cbe15cfbf8d79d0967 upstream.
+
+cpu_has_fpu macro uses smp_processor_id() and is currently executed
+with preemption enabled, that triggers the warning at runtime.
+
+It is assumed throughout the kernel that if any CPU has an FPU, then all
+CPUs would have an FPU as well, so it is safe to perform the check with
+preemption enabled - change the code to use raw_ variant of the check to
+avoid the warning.
+
+Signed-off-by: Marcin Nowakowski <marcin.nowakowski@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/14125/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/process.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -593,14 +593,14 @@ int mips_set_process_fp_mode(struct task
+ return -EOPNOTSUPP;
+
+ /* Avoid inadvertently triggering emulation */
+- if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
+- !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
++ if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
++ !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
+ return -EOPNOTSUPP;
+- if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
++ if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
+ return -EOPNOTSUPP;
+
+ /* FR = 0 not supported in MIPS R6 */
+- if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
++ if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
+ return -EOPNOTSUPP;
+
+ /* Proceed with the mode switch */
--- /dev/null
+From 7e956304eb8a285304a78582e4537e72c6365f20 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@imgtec.com>
+Date: Fri, 23 Sep 2016 15:13:53 +0100
+Subject: MIPS: Fix pre-r6 emulation FPU initialisation
+
+From: Paul Burton <paul.burton@imgtec.com>
+
+commit 7e956304eb8a285304a78582e4537e72c6365f20 upstream.
+
+In the mipsr2_decoder() function, used to emulate pre-MIPSr6
+instructions that were removed in MIPSr6, the init_fpu() function is
+called if a removed pre-MIPSr6 floating point instruction is the first
+floating point instruction used by the task. However, init_fpu()
+performs varous actions that rely upon not being migrated. For example
+in the most basic case it sets the coprocessor 0 Status.CU1 bit to
+enable the FPU & then loads FP register context into the FPU registers.
+If the task were to migrate during this time, it may end up attempting
+to load FP register context on a different CPU where it hasn't set the
+CU1 bit, leading to errors such as:
+
+ do_cpu invoked from kernel context![#2]:
+ CPU: 2 PID: 7338 Comm: fp-prctl Tainted: G D 4.7.0-00424-g49b0c82 #2
+ task: 838e4000 ti: 88d38000 task.ti: 88d38000
+ $ 0 : 00000000 00000001 ffffffff 88d3fef8
+ $ 4 : 838e4000 88d38004 00000000 00000001
+ $ 8 : 3400fc01 801f8020 808e9100 24000000
+ $12 : dbffffff 807b69d8 807b0000 00000000
+ $16 : 00000000 80786150 00400fc4 809c0398
+ $20 : 809c0338 0040273c 88d3ff28 808e9d30
+ $24 : 808e9d30 00400fb4
+ $28 : 88d38000 88d3fe88 00000000 8011a2ac
+ Hi : 0040273c
+ Lo : 88d3ff28
+ epc : 80114178 _restore_fp+0x10/0xa0
+ ra : 8011a2ac mipsr2_decoder+0xd5c/0x1660
+ Status: 1400fc03 KERNEL EXL IE
+ Cause : 1080002c (ExcCode 0b)
+ PrId : 0001a920 (MIPS I6400)
+ Modules linked in:
+ Process fp-prctl (pid: 7338, threadinfo=88d38000, task=838e4000, tls=766527d0)
+ Stack : 00000000 00000000 00000000 88d3fe98 00000000 00000000 809c0398 809c0338
+ 808e9100 00000000 88d3ff28 00400fc4 00400fc4 0040273c 7fb69e18 004a0000
+ 004a0000 004a0000 7664add0 8010de18 00000000 00000000 88d3fef8 88d3ff28
+ 808e9100 00000000 766527d0 8010e534 000c0000 85755000 8181d580 00000000
+ 00000000 00000000 004a0000 00000000 766527d0 7fb69e18 004a0000 80105c20
+ ...
+ Call Trace:
+ [<80114178>] _restore_fp+0x10/0xa0
+ [<8011a2ac>] mipsr2_decoder+0xd5c/0x1660
+ [<8010de18>] do_ri+0x90/0x6b8
+ [<80105c20>] ret_from_exception+0x0/0x10
+
+Fix this by disabling preemption around the call to init_fpu(), ensuring
+that it starts & completes on one CPU.
+
+Signed-off-by: Paul Burton <paul.burton@imgtec.com>
+Fixes: b0a668fb2038 ("MIPS: kernel: mips-r2-to-r6-emul: Add R2 emulator for MIPS R6")
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/14305/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/mips-r2-to-r6-emul.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
++++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
+@@ -1164,7 +1164,9 @@ fpu_emul:
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ if (!used_math()) { /* First time FPU user. */
++ preempt_disable();
+ err = init_fpu();
++ preempt_enable();
+ set_used_math();
+ }
+ lose_fpu(1); /* Save FPU state for the emulator. */
--- /dev/null
+From 951c39cd3bc0aedf67fbd8fb4b9380287e6205d1 Mon Sep 17 00:00:00 2001
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+Date: Mon, 5 Sep 2016 15:43:40 +0100
+Subject: MIPS: paravirt: Fix undefined reference to smp_bootstrap
+
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+
+commit 951c39cd3bc0aedf67fbd8fb4b9380287e6205d1 upstream.
+
+If the paravirt machine is compiles without CONFIG_SMP, the following
+linker error occurs
+
+arch/mips/kernel/head.o: In function `kernel_entry':
+(.ref.text+0x10): undefined reference to `smp_bootstrap'
+
+due to the kernel entry macro always including SMP startup code.
+Wrap this code in CONFIG_SMP to fix the error.
+
+Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Cc: linux-kernel@vger.kernel.org
+Patchwork: https://patchwork.linux-mips.org/patch/14212/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/mach-paravirt/kernel-entry-init.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
++++ b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
+@@ -11,11 +11,13 @@
+ #define CP0_EBASE $15, 1
+
+ .macro kernel_entry_setup
++#ifdef CONFIG_SMP
+ mfc0 t0, CP0_EBASE
+ andi t0, t0, 0x3ff # CPUNum
+ beqz t0, 1f
+ # CPUs other than zero goto smp_bootstrap
+ j smp_bootstrap
++#endif /* CONFIG_SMP */
+
+ 1:
+ .endm
--- /dev/null
+From b03c1e3b8eed9026733c473071d1f528358a0e50 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@imgtec.com>
+Date: Mon, 12 Sep 2016 10:58:06 +0100
+Subject: MIPS: Remove compact branch policy Kconfig entries
+
+From: Paul Burton <paul.burton@imgtec.com>
+
+commit b03c1e3b8eed9026733c473071d1f528358a0e50 upstream.
+
+Commit c1a0e9bc885d ("MIPS: Allow compact branch policy to be changed")
+added Kconfig entries allowing for the compact branch policy used by the
+compiler for MIPSr6 kernels to be specified. This can be useful for
+debugging, particularly in systems where compact branches have recently
+been introduced.
+
+Unfortunately mainline gcc 5.x supports MIPSr6 but not the
+-mcompact-branches compiler flag, leading to MIPSr6 kernels failing to
+build with gcc 5.x with errors such as:
+
+ mipsel-linux-gnu-gcc: error: unrecognized command line option '-mcompact-branches=optimal'
+ make[2]: *** [kernel/bounds.s] Error 1
+
+Fixing this by hiding the Kconfig entry behind another seems to be more
+hassle than it's worth, as MIPSr6 & compact branches have been around
+for a while now and if policy does need to be set for debug it can be
+done easily enough with KCFLAGS. Therefore remove the compact branch
+policy Kconfig entries & their handling in the Makefile.
+
+This reverts commit c1a0e9bc885d ("MIPS: Allow compact branch policy to
+be changed").
+
+Signed-off-by: Paul Burton <paul.burton@imgtec.com>
+Reported-by: kbuild test robot <fengguang.wu@intel.com>
+Fixes: c1a0e9bc885d ("MIPS: Allow compact branch policy to be changed")
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/14241/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/Kconfig.debug | 36 ------------------------------------
+ arch/mips/Makefile | 4 ----
+ 2 files changed, 40 deletions(-)
+
+--- a/arch/mips/Kconfig.debug
++++ b/arch/mips/Kconfig.debug
+@@ -113,42 +113,6 @@ config SPINLOCK_TEST
+ help
+ Add several files to the debugfs to test spinlock speed.
+
+-if CPU_MIPSR6
+-
+-choice
+- prompt "Compact branch policy"
+- default MIPS_COMPACT_BRANCHES_OPTIMAL
+-
+-config MIPS_COMPACT_BRANCHES_NEVER
+- bool "Never (force delay slot branches)"
+- help
+- Pass the -mcompact-branches=never flag to the compiler in order to
+- force it to always emit branches with delay slots, and make no use
+- of the compact branch instructions introduced by MIPSr6. This is
+- useful if you suspect there may be an issue with compact branches in
+- either the compiler or the CPU.
+-
+-config MIPS_COMPACT_BRANCHES_OPTIMAL
+- bool "Optimal (use where beneficial)"
+- help
+- Pass the -mcompact-branches=optimal flag to the compiler in order for
+- it to make use of compact branch instructions where it deems them
+- beneficial, and use branches with delay slots elsewhere. This is the
+- default compiler behaviour, and should be used unless you have a
+- reason to choose otherwise.
+-
+-config MIPS_COMPACT_BRANCHES_ALWAYS
+- bool "Always (force compact branches)"
+- help
+- Pass the -mcompact-branches=always flag to the compiler in order to
+- force it to always emit compact branches, making no use of branch
+- instructions with delay slots. This can result in more compact code
+- which may be beneficial in some scenarios.
+-
+-endchoice
+-
+-endif # CPU_MIPSR6
+-
+ config SCACHE_DEBUGFS
+ bool "L2 cache debugfs entries"
+ depends on DEBUG_FS
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -204,10 +204,6 @@ toolchain-msa := $(call cc-option-yn,
+ cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
+ endif
+
+-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never
+-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
+-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always
+-
+ #
+ # Firmware support
+ #
--- /dev/null
+From 8f46cca1e6c06a058374816887059bcc017b382f Mon Sep 17 00:00:00 2001
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+Date: Thu, 22 Sep 2016 17:15:47 +0100
+Subject: MIPS: SMP: Fix possibility of deadlock when bringing CPUs online
+
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+
+commit 8f46cca1e6c06a058374816887059bcc017b382f upstream.
+
+This patch fixes the possibility of a deadlock when bringing up
+secondary CPUs.
+The deadlock occurs because the set_cpu_online() is called before
+synchronise_count_slave(). This can cause a deadlock if the boot CPU,
+having scheduled another thread, attempts to send an IPI to the
+secondary CPU, which it sees has been marked online. The secondary is
+blocked in synchronise_count_slave() waiting for the boot CPU to enter
+synchronise_count_master(), but the boot cpu is blocked in
+smp_call_function_many() waiting for the secondary to respond to it's
+IPI request.
+
+Fix this by marking the CPU online in cpu_callin_map and synchronising
+counters before declaring the CPU online and calculating the maps for
+IPIs.
+
+Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
+Reported-by: Justin Chen <justinpopo6@gmail.com>
+Tested-by: Justin Chen <justinpopo6@gmail.com>
+Cc: Florian Fainelli <f.fainelli@gmail.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/14302/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/smp.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -174,6 +174,9 @@ asmlinkage void start_secondary(void)
+ cpumask_set_cpu(cpu, &cpu_coherent_mask);
+ notify_cpu_starting(cpu);
+
++ cpumask_set_cpu(cpu, &cpu_callin_map);
++ synchronise_count_slave(cpu);
++
+ set_cpu_online(cpu, true);
+
+ set_cpu_sibling_map(cpu);
+@@ -181,10 +184,6 @@ asmlinkage void start_secondary(void)
+
+ calculate_cpu_foreign_map();
+
+- cpumask_set_cpu(cpu, &cpu_callin_map);
+-
+- synchronise_count_slave(cpu);
+-
+ /*
+ * irq will be enabled in ->smp_finish(), enabling it too early
+ * is dangerous.
--- /dev/null
+From 554af0c396380baf416f54c439b99b495180b2f4 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Wed, 7 Sep 2016 13:37:01 +0100
+Subject: MIPS: vDSO: Fix Malta EVA mapping to vDSO page structs
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 554af0c396380baf416f54c439b99b495180b2f4 upstream.
+
+The page structures associated with the vDSO pages in the kernel image
+are calculated using virt_to_page(), which uses __pa() under the hood to
+find the pfn associated with the virtual address. The vDSO data pointers
+however point to kernel symbols, so __pa_symbol() should really be used
+instead.
+
+Since there is no equivalent to virt_to_page() which uses __pa_symbol(),
+fix init_vdso_image() to work directly with pfns, calculated with
+__phys_to_pfn(__pa_symbol(...)).
+
+This issue broke the Malta Enhanced Virtual Addressing (EVA)
+configuration which has a non-default implementation of __pa_symbol().
+This is because it uses a physical alias so that the kernel executes
+from KSeg0 (VA 0x80000000 -> PA 0x00000000), while RAM is provided to
+the kernel in the KUSeg range (VA 0x00000000 -> PA 0x80000000) which
+uses the same underlying RAM.
+
+Since there are no page structures associated with the low physical
+address region, some arbitrary kernel memory would be interpreted as a
+page structure for the vDSO pages and badness ensues.
+
+Fixes: ebb5e78cc634 ("MIPS: Initial implementation of a VDSO")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/14229/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/vdso.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/kernel/vdso.c
++++ b/arch/mips/kernel/vdso.c
+@@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vv
+ static void __init init_vdso_image(struct mips_vdso_image *image)
+ {
+ unsigned long num_pages, i;
++ unsigned long data_pfn;
+
+ BUG_ON(!PAGE_ALIGNED(image->data));
+ BUG_ON(!PAGE_ALIGNED(image->size));
+
+ num_pages = image->size / PAGE_SIZE;
+
+- for (i = 0; i < num_pages; i++) {
+- image->mapping.pages[i] =
+- virt_to_page(image->data + (i * PAGE_SIZE));
+- }
++ data_pfn = __phys_to_pfn(__pa_symbol(image->data));
++ for (i = 0; i < num_pages; i++)
++ image->mapping.pages[i] = pfn_to_page(data_pfn + i);
+ }
+
+ static int __init init_vdso(void)
--- /dev/null
+From b385d21f27d86426472f6ae92a231095f7de2a8d Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Fri, 23 Sep 2016 20:27:04 -0700
+Subject: mm: delete unnecessary and unsafe init_tlb_ubc()
+
+From: Hugh Dickins <hughd@google.com>
+
+commit b385d21f27d86426472f6ae92a231095f7de2a8d upstream.
+
+init_tlb_ubc() looked unnecessary to me: tlb_ubc is statically
+initialized with zeroes in the init_task, and copied from parent to
+child while it is quiescent in arch_dup_task_struct(); so I went to
+delete it.
+
+But inserted temporary debug WARN_ONs in place of init_tlb_ubc() to
+check that it was always empty at that point, and found them firing:
+because memcg reclaim can recurse into global reclaim (when allocating
+biosets for swapout in my case), and arrive back at the init_tlb_ubc()
+in shrink_node_memcg().
+
+Resetting tlb_ubc.flush_required at that point is wrong: if the upper
+level needs a deferred TLB flush, but the lower level turns out not to,
+we miss a TLB flush. But fortunately, that's the only part of the
+protocol that does not nest: with the initialization removed, cpumask
+collects bits from upper and lower levels, and flushes TLB when needed.
+
+Fixes: 72b252aed506 ("mm: send one IPI per CPU to TLB flush all entries after unmapping pages")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Mel Gorman <mgorman@techsingularity.net>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmscan.c | 19 -------------------
+ 1 file changed, 19 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2159,23 +2159,6 @@ out:
+ }
+ }
+
+-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+-static void init_tlb_ubc(void)
+-{
+- /*
+- * This deliberately does not clear the cpumask as it's expensive
+- * and unnecessary. If there happens to be data in there then the
+- * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
+- * then will be cleared.
+- */
+- current->tlb_ubc.flush_required = false;
+-}
+-#else
+-static inline void init_tlb_ubc(void)
+-{
+-}
+-#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+-
+ /*
+ * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
+ */
+@@ -2210,8 +2193,6 @@ static void shrink_lruvec(struct lruvec
+ scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
+ sc->priority == DEF_PRIORITY);
+
+- init_tlb_ubc();
+-
+ blk_start_plug(&plug);
+ while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
+ nr[LRU_INACTIVE_FILE]) {
--- /dev/null
+From dc01a28d80a42cef08c94dfc595565aaebe46d15 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Fri, 15 Jul 2016 14:06:30 +0300
+Subject: mtd: maps: sa1100-flash: potential NULL dereference
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit dc01a28d80a42cef08c94dfc595565aaebe46d15 upstream.
+
+We check for NULL but then dereference "info->mtd" on the next line.
+
+Fixes: 72169755cf36 ('mtd: maps: sa1100-flash: show parent device in sysfs')
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/maps/sa1100-flash.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/maps/sa1100-flash.c
++++ b/drivers/mtd/maps/sa1100-flash.c
+@@ -230,8 +230,10 @@ static struct sa_info *sa1100_setup_mtd(
+
+ info->mtd = mtd_concat_create(cdev, info->num_subdev,
+ plat->name);
+- if (info->mtd == NULL)
++ if (info->mtd == NULL) {
+ ret = -ENXIO;
++ goto err;
++ }
+ }
+ info->mtd->dev.parent = &pdev->dev;
+
--- /dev/null
+From 79ad07d45743721010e766e65dc004ad249bd429 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Thu, 14 Jul 2016 13:44:56 +0300
+Subject: mtd: pmcmsp-flash: Allocating too much in init_msp_flash()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 79ad07d45743721010e766e65dc004ad249bd429 upstream.
+
+There is a cut and paste issue here. The bug is that we are allocating
+more memory than necessary for msp_maps. We should be allocating enough
+space for a map_info struct (144 bytes) but we instead allocate enough
+for an mtd_info struct (1840 bytes). It's a small waste.
+
+The other part of this is not harmful but when we allocated msp_flash
+then we allocated enough space fro a map_info pointer instead of an
+mtd_info pointer. But since pointers are the same size it works out
+fine.
+
+Anyway, I decided to clean up all three allocations a bit to make them
+a bit more consistent and clear.
+
+Fixes: 68aa0fa87f6d ('[MTD] PMC MSP71xx flash/rootfs mappings')
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/maps/pmcmsp-flash.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/maps/pmcmsp-flash.c
++++ b/drivers/mtd/maps/pmcmsp-flash.c
+@@ -75,15 +75,15 @@ static int __init init_msp_flash(void)
+
+ printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
+
+- msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
++ msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL);
+ if (!msp_flash)
+ return -ENOMEM;
+
+- msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
++ msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL);
+ if (!msp_parts)
+ goto free_msp_flash;
+
+- msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
++ msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL);
+ if (!msp_maps)
+ goto free_msp_parts;
+
--- /dev/null
+From ad5987b47e96a0fb6d13fea250e936aed000093c Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Tue, 13 Sep 2016 15:53:55 +0200
+Subject: nl80211: validate number of probe response CSA counters
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit ad5987b47e96a0fb6d13fea250e936aed000093c upstream.
+
+Due to an apparent copy/paste bug, the number of counters for the
+beacon configuration were checked twice, instead of checking the
+number of probe response counters. Fix this to check the number of
+probe response counters before parsing those.
+
+Fixes: 9a774c78e211 ("cfg80211: Support multiple CSA counters")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/wireless/nl80211.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -6628,7 +6628,7 @@ static int nl80211_channel_switch(struct
+
+ params.n_counter_offsets_presp = len / sizeof(u16);
+ if (rdev->wiphy.max_num_csa_counters &&
+- (params.n_counter_offsets_beacon >
++ (params.n_counter_offsets_presp >
+ rdev->wiphy.max_num_csa_counters))
+ return -EINVAL;
+
--- /dev/null
+From bae170efd6c42bf116f513a1dd07639d68fa71b9 Mon Sep 17 00:00:00 2001
+From: Arvind Yadav <arvind.yadav.cs@gmail.com>
+Date: Fri, 12 Aug 2016 20:49:18 +0530
+Subject: power: reset: hisi-reboot: Unmap region obtained by of_iomap
+
+From: Arvind Yadav <arvind.yadav.cs@gmail.com>
+
+commit bae170efd6c42bf116f513a1dd07639d68fa71b9 upstream.
+
+Free memory mapping, if probe is not successful.
+
+Fixes: 4a9b37371822 ("power: reset: move hisilicon reboot code")
+Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
+Signed-off-by: Sebastian Reichel <sre@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/power/reset/hisi-reboot.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/power/reset/hisi-reboot.c
++++ b/drivers/power/reset/hisi-reboot.c
+@@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct plat
+
+ if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) {
+ pr_err("failed to find reboot-offset property\n");
++ iounmap(base);
+ return -EINVAL;
+ }
+
+ err = register_restart_handler(&hisi_restart_nb);
+- if (err)
++ if (err) {
+ dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n",
+ err);
++ iounmap(base);
++ }
+
+ return err;
+ }
autofs-races.patch
autofs-use-dentry-flags-to-block-walks-during-expire.patch
xfs-prevent-dropping-ioend-completions-during-buftarg-wait.patch
+fsnotify-add-a-way-to-stop-queueing-events-on-group-shutdown.patch
+fanotify-fix-list-corruption-in-fanotify_get_response.patch
+fix-fault_in_multipages_...-on-architectures-with-no-op-access_ok.patch
+mtd-maps-sa1100-flash-potential-null-dereference.patch
+mtd-pmcmsp-flash-allocating-too-much-in-init_msp_flash.patch
+power-reset-hisi-reboot-unmap-region-obtained-by-of_iomap.patch
+fix-memory-leaks-in-tracing_buffers_splice_read.patch
+tracing-move-mutex-to-protect-against-resetting-of-seq-data.patch
+mm-delete-unnecessary-and-unsafe-init_tlb_ubc.patch
+can-flexcan-fix-resume-function.patch
+nl80211-validate-number-of-probe-response-csa-counters.patch
+btrfs-ensure-that-file-descriptor-used-with-subvol-ioctls-is-a-dir.patch
+i2c-eg20t-fix-race-between-i2c-init-and-interrupt-enable.patch
+i2c-qup-skip-qup_i2c_suspend-if-the-device-is-already-runtime-suspended.patch
+mips-fix-pre-r6-emulation-fpu-initialisation.patch
+mips-smp-fix-possibility-of-deadlock-when-bringing-cpus-online.patch
+mips-vdso-fix-malta-eva-mapping-to-vdso-page-structs.patch
+mips-remove-compact-branch-policy-kconfig-entries.patch
+mips-avoid-a-bug-warning-during-prctl-pr_set_fp_mode.patch
+mips-add-a-missing-.set-pop-in-an-early-commit.patch
+mips-paravirt-fix-undefined-reference-to-smp_bootstrap.patch
--- /dev/null
+From 1245800c0f96eb6ebb368593e251d66c01e61022 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Fri, 23 Sep 2016 22:57:13 -0400
+Subject: tracing: Move mutex to protect against resetting of seq data
+
+From: Steven Rostedt (Red Hat) <rostedt@goodmis.org>
+
+commit 1245800c0f96eb6ebb368593e251d66c01e61022 upstream.
+
+The iter->seq can be reset outside the protection of the mutex. So can
+reading of user data. Move the mutex up to the beginning of the function.
+
+Fixes: d7350c3f45694 ("tracing/core: make the read callbacks reentrants")
+Reported-by: Al Viro <viro@ZenIV.linux.org.uk>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4727,19 +4727,20 @@ tracing_read_pipe(struct file *filp, cha
+ struct trace_iterator *iter = filp->private_data;
+ ssize_t sret;
+
+- /* return any leftover data */
+- sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+- if (sret != -EBUSY)
+- return sret;
+-
+- trace_seq_init(&iter->seq);
+-
+ /*
+ * Avoid more than one consumer on a single file descriptor
+ * This is just a matter of traces coherency, the ring buffer itself
+ * is protected.
+ */
+ mutex_lock(&iter->mutex);
++
++ /* return any leftover data */
++ sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
++ if (sret != -EBUSY)
++ goto out;
++
++ trace_seq_init(&iter->seq);
++
+ if (iter->trace->read) {
+ sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
+ if (sret)