--- /dev/null
+From f39f775218a7520e3700de2003c84a042c3b5972 Mon Sep 17 00:00:00 2001
+From: Maor Gottlieb <maorg@mellanox.com>
+Date: Thu, 19 Jan 2017 15:25:58 +0200
+Subject: IB/rxe: Fix rxe dev insertion to rxe_dev_list
+
+From: Maor Gottlieb <maorg@mellanox.com>
+
+commit f39f775218a7520e3700de2003c84a042c3b5972 upstream.
+
+The first argument of list_add_tail is the new item and the second
+is the head of the list. Fix the code to pass arguments in the
+right order, otherwise not all the rxe devices will be removed
+during teardown.
+
+Fixes: 8700e3e7c4857 ('Soft RoCE driver')
+Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
+Reviewed-by: Moni Shoua <monis@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/sw/rxe/rxe_net.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -554,7 +554,7 @@ struct rxe_dev *rxe_net_add(struct net_d
+ }
+
+ spin_lock_bh(&dev_list_lock);
+- list_add_tail(&rxe_dev_list, &rxe->list);
++ list_add_tail(&rxe->list, &rxe_dev_list);
+ spin_unlock_bh(&dev_list_lock);
+ return rxe;
+ }
--- /dev/null
+From 2d4b21e0a2913612274a69a3ba1bfee4cffc6e77 Mon Sep 17 00:00:00 2001
+From: Yonatan Cohen <yonatanc@mellanox.com>
+Date: Thu, 19 Jan 2017 15:25:59 +0200
+Subject: IB/rxe: Prevent from completer to operate on non valid QP
+
+From: Yonatan Cohen <yonatanc@mellanox.com>
+
+commit 2d4b21e0a2913612274a69a3ba1bfee4cffc6e77 upstream.
+
+On UD QP completer tasklet is scheduled for each packet sent.
+
+If it is followed by a destroy_qp(), the kernel panic will
+happen as the completer tries to operate on a destroyed QP.
+
+Fixes: 8700e3e7c485 ("Soft RoCE driver")
+Signed-off-by: Yonatan Cohen <yonatanc@mellanox.com>
+Reviewed-by: Moni Shoua <monis@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/sw/rxe/rxe_qp.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
+ del_timer_sync(&qp->rnr_nak_timer);
+
+ rxe_cleanup_task(&qp->req.task);
+- if (qp_type(qp) == IB_QPT_RC)
+- rxe_cleanup_task(&qp->comp.task);
++ rxe_cleanup_task(&qp->comp.task);
+
+ /* flush out any receive wr's or pending requests */
+ __rxe_do_task(&qp->req.task);
--- /dev/null
+From 828f6fa65ce7e80f77f5ab12942e44eb3d9d174e Mon Sep 17 00:00:00 2001
+From: Kenneth Lee <liguozhu@hisilicon.com>
+Date: Thu, 5 Jan 2017 15:00:05 +0800
+Subject: IB/umem: Release pid in error and ODP flow
+
+From: Kenneth Lee <liguozhu@hisilicon.com>
+
+commit 828f6fa65ce7e80f77f5ab12942e44eb3d9d174e upstream.
+
+1. Release pid before enter odp flow
+2. Release pid when fail to allocate memory
+
+Fixes: 87773dd56d54 ("IB: ib_umem_release() should decrement mm->pinned_vm from ib_umem_get")
+Fixes: 8ada2c1c0c1d ("IB/core: Add support for on demand paging regions")
+Signed-off-by: Kenneth Lee <liguozhu@hisilicon.com>
+Reviewed-by: Haggai Eran <haggaie@mellanox.com>
+Reviewed-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/umem.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_uc
+ IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
+
+ if (access & IB_ACCESS_ON_DEMAND) {
++ put_pid(umem->pid);
+ ret = ib_umem_odp_get(context, umem);
+ if (ret) {
+ kfree(umem);
+@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_uc
+
+ page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!page_list) {
++ put_pid(umem->pid);
+ kfree(umem);
+ return ERR_PTR(-ENOMEM);
+ }
--- /dev/null
+From 8a1f780e7f28c7c1d640118242cf68d528c456cd Mon Sep 17 00:00:00 2001
+From: Yasuaki Ishimatsu <yasu.isimatu@gmail.com>
+Date: Tue, 24 Jan 2017 15:17:45 -0800
+Subject: memory_hotplug: make zone_can_shift() return a boolean value
+
+From: Yasuaki Ishimatsu <yasu.isimatu@gmail.com>
+
+commit 8a1f780e7f28c7c1d640118242cf68d528c456cd upstream.
+
+online_{kernel|movable} is used to change the memory zone to
+ZONE_{NORMAL|MOVABLE} and online the memory.
+
+To check that memory zone can be changed, zone_can_shift() is used.
+Currently the function returns minus integer value, plus integer
+value and 0. When the function returns minus or plus integer value,
+it means that the memory zone can be changed to ZONE_{NORNAL|MOVABLE}.
+
+But when the function returns 0, there are two meanings.
+
+One of the meanings is that the memory zone does not need to be changed.
+For example, when memory is in ZONE_NORMAL and onlined by online_kernel
+the memory zone does not need to be changed.
+
+Another meaning is that the memory zone cannot be changed. When memory
+is in ZONE_NORMAL and onlined by online_movable, the memory zone may
+not be changed to ZONE_MOVALBE due to memory online limitation(see
+Documentation/memory-hotplug.txt). In this case, memory must not be
+onlined.
+
+The patch changes the return type of zone_can_shift() so that memory
+online operation fails when memory zone cannot be changed as follows:
+
+Before applying patch:
+ # grep -A 35 "Node 2" /proc/zoneinfo
+ Node 2, zone Normal
+ <snip>
+ node_scanned 0
+ spanned 8388608
+ present 7864320
+ managed 7864320
+ # echo online_movable > memory4097/state
+ # grep -A 35 "Node 2" /proc/zoneinfo
+ Node 2, zone Normal
+ <snip>
+ node_scanned 0
+ spanned 8388608
+ present 8388608
+ managed 8388608
+
+ online_movable operation succeeded. But memory is onlined as
+ ZONE_NORMAL, not ZONE_MOVABLE.
+
+After applying patch:
+ # grep -A 35 "Node 2" /proc/zoneinfo
+ Node 2, zone Normal
+ <snip>
+ node_scanned 0
+ spanned 8388608
+ present 7864320
+ managed 7864320
+ # echo online_movable > memory4097/state
+ bash: echo: write error: Invalid argument
+ # grep -A 35 "Node 2" /proc/zoneinfo
+ Node 2, zone Normal
+ <snip>
+ node_scanned 0
+ spanned 8388608
+ present 7864320
+ managed 7864320
+
+ online_movable operation failed because of failure of changing
+ the memory zone from ZONE_NORMAL to ZONE_MOVABLE
+
+Fixes: df429ac03936 ("memory-hotplug: more general validation of zone during online")
+Link: http://lkml.kernel.org/r/2f9c3837-33d7-b6e5-59c0-6ca4372b2d84@gmail.com
+Signed-off-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
+Reviewed-by: Reza Arbab <arbab@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/memory.c | 4 ++--
+ include/linux/memory_hotplug.h | 4 ++--
+ mm/memory_hotplug.c | 28 +++++++++++++++++-----------
+ 3 files changed, 21 insertions(+), 15 deletions(-)
+
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -410,14 +410,14 @@ static ssize_t show_valid_zones(struct d
+ sprintf(buf, "%s", zone->name);
+
+ /* MMOP_ONLINE_KERNEL */
+- zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
++ zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
+ if (zone_shift) {
+ strcat(buf, " ");
+ strcat(buf, (zone + zone_shift)->name);
+ }
+
+ /* MMOP_ONLINE_MOVABLE */
+- zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
++ zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
+ if (zone_shift) {
+ strcat(buf, " ");
+ strcat(buf, (zone + zone_shift)->name);
+--- a/include/linux/memory_hotplug.h
++++ b/include/linux/memory_hotplug.h
+@@ -284,7 +284,7 @@ extern void sparse_remove_one_section(st
+ unsigned long map_offset);
+ extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
+ unsigned long pnum);
+-extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+- enum zone_type target);
++extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
++ enum zone_type target, int *zone_shift);
+
+ #endif /* __LINUX_MEMORY_HOTPLUG_H */
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1033,36 +1033,39 @@ static void node_states_set_node(int nod
+ node_set_state(node, N_MEMORY);
+ }
+
+-int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+- enum zone_type target)
++bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
++ enum zone_type target, int *zone_shift)
+ {
+ struct zone *zone = page_zone(pfn_to_page(pfn));
+ enum zone_type idx = zone_idx(zone);
+ int i;
+
++ *zone_shift = 0;
++
+ if (idx < target) {
+ /* pages must be at end of current zone */
+ if (pfn + nr_pages != zone_end_pfn(zone))
+- return 0;
++ return false;
+
+ /* no zones in use between current zone and target */
+ for (i = idx + 1; i < target; i++)
+ if (zone_is_initialized(zone - idx + i))
+- return 0;
++ return false;
+ }
+
+ if (target < idx) {
+ /* pages must be at beginning of current zone */
+ if (pfn != zone->zone_start_pfn)
+- return 0;
++ return false;
+
+ /* no zones in use between current zone and target */
+ for (i = target + 1; i < idx; i++)
+ if (zone_is_initialized(zone - idx + i))
+- return 0;
++ return false;
+ }
+
+- return target - idx;
++ *zone_shift = target - idx;
++ return true;
+ }
+
+ /* Must be protected by mem_hotplug_begin() */
+@@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn
+ !can_online_high_movable(zone))
+ return -EINVAL;
+
+- if (online_type == MMOP_ONLINE_KERNEL)
+- zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL);
+- else if (online_type == MMOP_ONLINE_MOVABLE)
+- zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE);
++ if (online_type == MMOP_ONLINE_KERNEL) {
++ if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
++ return -EINVAL;
++ } else if (online_type == MMOP_ONLINE_MOVABLE) {
++ if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
++ return -EINVAL;
++ }
+
+ zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
+ if (!zone)
--- /dev/null
+From 3674534b775354516e5c148ea48f51d4d1909a78 Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Tue, 24 Jan 2017 15:18:10 -0800
+Subject: mm, memcg: do not retry precharge charges
+
+From: David Rientjes <rientjes@google.com>
+
+commit 3674534b775354516e5c148ea48f51d4d1909a78 upstream.
+
+When memory.move_charge_at_immigrate is enabled and precharges are
+depleted during move, mem_cgroup_move_charge_pte_range() will attempt to
+increase the size of the precharge.
+
+Prevent precharges from ever looping by setting __GFP_NORETRY. This was
+probably the intention of the GFP_KERNEL & ~__GFP_NORETRY, which is
+pointless as written.
+
+Fixes: 0029e19ebf84 ("mm: memcontrol: remove explicit OOM parameter in charge path")
+Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1701130208510.69402@chino.kir.corp.google.com
+Signed-off-by: David Rientjes <rientjes@google.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memcontrol.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4360,9 +4360,9 @@ static int mem_cgroup_do_precharge(unsig
+ return ret;
+ }
+
+- /* Try charges one by one with reclaim */
++ /* Try charges one by one with reclaim, but do not retry */
+ while (count--) {
+- ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
++ ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
+ if (ret)
+ return ret;
+ mc.precharge++;
--- /dev/null
+From 321027c1fe77f892f4ea07846aeae08cefbbb290 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 11 Jan 2017 21:09:50 +0100
+Subject: perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 321027c1fe77f892f4ea07846aeae08cefbbb290 upstream.
+
+Di Shen reported a race between two concurrent sys_perf_event_open()
+calls where both try and move the same pre-existing software group
+into a hardware context.
+
+The problem is exactly that described in commit:
+
+ f63a8daa5812 ("perf: Fix event->ctx locking")
+
+... where, while we wait for a ctx->mutex acquisition, the event->ctx
+relation can have changed under us.
+
+That very same commit failed to recognise sys_perf_event_context() as an
+external access vector to the events and thereby didn't apply the
+established locking rules correctly.
+
+So while one sys_perf_event_open() call is stuck waiting on
+mutex_lock_double(), the other (which owns said locks) moves the group
+about. So by the time the former sys_perf_event_open() acquires the
+locks, the context we've acquired is stale (and possibly dead).
+
+Apply the established locking rules as per perf_event_ctx_lock_nested()
+to the mutex_lock_double() for the 'move_group' case. This obviously means
+we need to validate state after we acquire the locks.
+
+Reported-by: Di Shen (Keen Lab)
+Tested-by: John Dias <joaodias@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Min Chong <mchong@google.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Fixes: f63a8daa5812 ("perf: Fix event->ctx locking")
+Link: http://lkml.kernel.org/r/20170106131444.GZ3174@twins.programming.kicks-ass.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 54 insertions(+), 4 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9503,6 +9503,37 @@ static int perf_event_set_clock(struct p
+ return 0;
+ }
+
++/*
++ * Variation on perf_event_ctx_lock_nested(), except we take two context
++ * mutexes.
++ */
++static struct perf_event_context *
++__perf_event_ctx_lock_double(struct perf_event *group_leader,
++ struct perf_event_context *ctx)
++{
++ struct perf_event_context *gctx;
++
++again:
++ rcu_read_lock();
++ gctx = READ_ONCE(group_leader->ctx);
++ if (!atomic_inc_not_zero(&gctx->refcount)) {
++ rcu_read_unlock();
++ goto again;
++ }
++ rcu_read_unlock();
++
++ mutex_lock_double(&gctx->mutex, &ctx->mutex);
++
++ if (group_leader->ctx != gctx) {
++ mutex_unlock(&ctx->mutex);
++ mutex_unlock(&gctx->mutex);
++ put_ctx(gctx);
++ goto again;
++ }
++
++ return gctx;
++}
++
+ /**
+ * sys_perf_event_open - open a performance event, associate it to a task/cpu
+ *
+@@ -9746,12 +9777,31 @@ SYSCALL_DEFINE5(perf_event_open,
+ }
+
+ if (move_group) {
+- gctx = group_leader->ctx;
+- mutex_lock_double(&gctx->mutex, &ctx->mutex);
++ gctx = __perf_event_ctx_lock_double(group_leader, ctx);
++
+ if (gctx->task == TASK_TOMBSTONE) {
+ err = -ESRCH;
+ goto err_locked;
+ }
++
++ /*
++ * Check if we raced against another sys_perf_event_open() call
++ * moving the software group underneath us.
++ */
++ if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
++ /*
++ * If someone moved the group out from under us, check
++ * if this new event wound up on the same ctx, if so
++ * its the regular !move_group case, otherwise fail.
++ */
++ if (gctx != ctx) {
++ err = -EINVAL;
++ goto err_locked;
++ } else {
++ perf_event_ctx_unlock(group_leader, gctx);
++ move_group = 0;
++ }
++ }
+ } else {
+ mutex_lock(&ctx->mutex);
+ }
+@@ -9853,7 +9903,7 @@ SYSCALL_DEFINE5(perf_event_open,
+ perf_unpin_context(ctx);
+
+ if (move_group)
+- mutex_unlock(&gctx->mutex);
++ perf_event_ctx_unlock(group_leader, gctx);
+ mutex_unlock(&ctx->mutex);
+
+ if (task) {
+@@ -9879,7 +9929,7 @@ SYSCALL_DEFINE5(perf_event_open,
+
+ err_locked:
+ if (move_group)
+- mutex_unlock(&gctx->mutex);
++ perf_event_ctx_unlock(group_leader, gctx);
+ mutex_unlock(&ctx->mutex);
+ /* err_file: */
+ fput(event_file);
--- /dev/null
+From 04ff5a095d662e0879f0eb04b9247e092210aeff Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Tue, 10 Jan 2017 16:38:52 +0200
+Subject: pinctrl: baytrail: Rectify debounce support
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 04ff5a095d662e0879f0eb04b9247e092210aeff upstream.
+
+The commit 658b476c742f ("pinctrl: baytrail: Add debounce configuration")
+implements debounce for Baytrail pin control, but seems wasn't tested properly.
+
+The register which keeps debounce value is separated from the configuration
+one. Writing wrong values to the latter will guarantee wrong behaviour of the
+driver and even might break something physically.
+
+Besides above there is missed case how to disable it, which is actually done
+through the bit in configuration register.
+
+Rectify implementation here by using proper register for debounce value.
+
+Fixes: 658b476c742f ("pinctrl: baytrail: Add debounce configuration")
+Cc: Cristina Ciocan <cristina.ciocan@intel.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/intel/pinctrl-baytrail.c | 28 +++++++++++++++++-----------
+ 1 file changed, 17 insertions(+), 11 deletions(-)
+
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -1092,6 +1092,7 @@ static int byt_pin_config_get(struct pin
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
++ void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
+ unsigned long flags;
+ u32 conf, pull, val, debounce;
+ u16 arg = 0;
+@@ -1128,7 +1129,7 @@ static int byt_pin_config_get(struct pin
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&vg->lock, flags);
+- debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG));
++ debounce = readl(db_reg);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+ switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
+@@ -1176,6 +1177,7 @@ static int byt_pin_config_set(struct pin
+ unsigned int param, arg;
+ void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
++ void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
+ unsigned long flags;
+ u32 conf, val, debounce;
+ int i, ret = 0;
+@@ -1238,36 +1240,40 @@ static int byt_pin_config_set(struct pin
+
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+- debounce = readl(byt_gpio_reg(vg, offset,
+- BYT_DEBOUNCE_REG));
+- conf &= ~BYT_DEBOUNCE_PULSE_MASK;
++ debounce = readl(db_reg);
++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
+
+ switch (arg) {
++ case 0:
++ conf &= BYT_DEBOUNCE_EN;
++ break;
+ case 375:
+- conf |= BYT_DEBOUNCE_PULSE_375US;
++ debounce |= BYT_DEBOUNCE_PULSE_375US;
+ break;
+ case 750:
+- conf |= BYT_DEBOUNCE_PULSE_750US;
++ debounce |= BYT_DEBOUNCE_PULSE_750US;
+ break;
+ case 1500:
+- conf |= BYT_DEBOUNCE_PULSE_1500US;
++ debounce |= BYT_DEBOUNCE_PULSE_1500US;
+ break;
+ case 3000:
+- conf |= BYT_DEBOUNCE_PULSE_3MS;
++ debounce |= BYT_DEBOUNCE_PULSE_3MS;
+ break;
+ case 6000:
+- conf |= BYT_DEBOUNCE_PULSE_6MS;
++ debounce |= BYT_DEBOUNCE_PULSE_6MS;
+ break;
+ case 12000:
+- conf |= BYT_DEBOUNCE_PULSE_12MS;
++ debounce |= BYT_DEBOUNCE_PULSE_12MS;
+ break;
+ case 24000:
+- conf |= BYT_DEBOUNCE_PULSE_24MS;
++ debounce |= BYT_DEBOUNCE_PULSE_24MS;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
++ if (!ret)
++ writel(debounce, db_reg);
+ break;
+ default:
+ ret = -ENOTSUPP;
--- /dev/null
+From ecc8995363ee6231b32dad61c955b371b79cc4cf Mon Sep 17 00:00:00 2001
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+Date: Tue, 10 Jan 2017 17:31:56 +0300
+Subject: pinctrl: broxton: Use correct PADCFGLOCK offset
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+commit ecc8995363ee6231b32dad61c955b371b79cc4cf upstream.
+
+PADCFGLOCK (and PADCFGLOCK_TX) offset in Broxton actually starts at 0x060
+and not 0x090 as used in the driver. Fix it to use the correct offset.
+
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/intel/pinctrl-broxton.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/pinctrl/intel/pinctrl-broxton.c
++++ b/drivers/pinctrl/intel/pinctrl-broxton.c
+@@ -19,7 +19,7 @@
+
+ #define BXT_PAD_OWN 0x020
+ #define BXT_HOSTSW_OWN 0x080
+-#define BXT_PADCFGLOCK 0x090
++#define BXT_PADCFGLOCK 0x060
+ #define BXT_GPI_IE 0x110
+
+ #define BXT_COMMUNITY(s, e) \
--- /dev/null
+From df1539c25cce98e2ac69881958850c6535240707 Mon Sep 17 00:00:00 2001
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+Date: Tue, 17 Jan 2017 19:52:54 +0900
+Subject: pinctrl: uniphier: fix Ethernet (RMII) pin-mux setting for LD20
+
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+
+commit df1539c25cce98e2ac69881958850c6535240707 upstream.
+
+Fix the pin-mux values for the MDC, MDIO, MDIO_INTL, PHYRSTL pins.
+
+Fixes: 1e359ab1285e ("pinctrl: uniphier: add Ethernet pin-mux settings")
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
++++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] =
+ 0, 0, 0, 0};
+ static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
+ 41, 42, 45};
+-static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
++static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
+ static const unsigned i2c0_pins[] = {63, 64};
+ static const int i2c0_muxvals[] = {0, 0};
+ static const unsigned i2c1_pins[] = {65, 66};
--- /dev/null
+From 5a00b6c2438460b870a451f14593fc40d3c7edf6 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Thu, 19 Jan 2017 18:39:40 +0200
+Subject: platform/x86: intel_mid_powerbtn: Set IRQ_ONESHOT
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 5a00b6c2438460b870a451f14593fc40d3c7edf6 upstream.
+
+The commit 1c6c69525b40 ("genirq: Reject bogus threaded irq requests")
+starts refusing misconfigured interrupt handlers. This makes
+intel_mid_powerbtn not working anymore.
+
+Add a mandatory flag to a threaded IRQ request in the driver.
+
+Fixes: 1c6c69525b40 ("genirq: Reject bogus threaded irq requests")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/platform/x86/intel_mid_powerbtn.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/platform/x86/intel_mid_powerbtn.c
++++ b/drivers/platform/x86/intel_mid_powerbtn.c
+@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform
+
+ input_set_capability(input, EV_KEY, KEY_POWER);
+
+- error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
++ error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
+ DRIVER_NAME, input);
+ if (error) {
+ dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
--- /dev/null
+From 63d762b88cb5510f2bfdb5112ced18cde867ae61 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Sat, 7 Jan 2017 09:33:34 +0300
+Subject: platform/x86: mlx-platform: free first dev on error
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 63d762b88cb5510f2bfdb5112ced18cde867ae61 upstream.
+
+There is an off-by-one error so we don't unregister priv->pdev_mux[0].
+Also it's slightly simpler as a while loop instead of a for loop.
+
+Fixes: 58cbbee2391c ("x86/platform/mellanox: Introduce support for Mellanox systems platform")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Vadim Pasternak <vadimp@mellanox.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/platform/mellanox/mlx-platform.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/platform/mellanox/mlx-platform.c
++++ b/arch/x86/platform/mellanox/mlx-platform.c
+@@ -233,7 +233,7 @@ static int __init mlxplat_init(void)
+ return 0;
+
+ fail_platform_mux_register:
+- for (i--; i > 0 ; i--)
++ while (--i >= 0)
+ platform_device_unregister(priv->pdev_mux[i]);
+ platform_device_unregister(priv->pdev_i2c);
+ fail_alloc:
--- /dev/null
+From c739c0a7c3c2472d7562b8f802cdce44d2597c8b Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 9 Dec 2016 09:41:29 -0200
+Subject: [media] s5k4ecgx: select CRC32 helper
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit c739c0a7c3c2472d7562b8f802cdce44d2597c8b upstream.
+
+A rare randconfig build failure shows up in this driver when
+the CRC32 helper is not there:
+
+drivers/media/built-in.o: In function `s5k4ecgx_s_power':
+s5k4ecgx.c:(.text+0x9eb4): undefined reference to `crc32_le'
+
+This adds the 'select' that all other users of this function have.
+
+Fixes: 8b99312b7214 ("[media] Add v4l2 subdev driver for S5K4ECGX sensor")
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/i2c/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -655,6 +655,7 @@ config VIDEO_S5K6A3
+ config VIDEO_S5K4ECGX
+ tristate "Samsung S5K4ECGX sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
++ select CRC32
+ ---help---
+ This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
+ camera sensor with an embedded SoC image signal processor.
drm-i915-don-t-init-hpd-polling-for-vlv-and-chv-from-runtime_suspend.patch
drm-i915-fix-calculation-of-rotated-x-and-y-offsets-for-planar-formats.patch
drm-i915-check-for-null-atomic-state-in-intel_crtc_disable_noatomic.patch
+ib-umem-release-pid-in-error-and-odp-flow.patch
+ib-rxe-fix-rxe-dev-insertion-to-rxe_dev_list.patch
+ib-rxe-prevent-from-completer-to-operate-on-non-valid-qp.patch
+s5k4ecgx-select-crc32-helper.patch
+pinctrl-broxton-use-correct-padcfglock-offset.patch
+pinctrl-uniphier-fix-ethernet-rmii-pin-mux-setting-for-ld20.patch
+pinctrl-baytrail-rectify-debounce-support.patch
+memory_hotplug-make-zone_can_shift-return-a-boolean-value.patch
+virtio_mmio-set-dma-masks-appropriately.patch
+platform-x86-mlx-platform-free-first-dev-on-error.patch
+platform-x86-intel_mid_powerbtn-set-irq_oneshot.patch
+mm-memcg-do-not-retry-precharge-charges.patch
+perf-core-fix-concurrent-sys_perf_event_open-vs.-move_group-race.patch
--- /dev/null
+From f7f6634d23830ff74335734fbdb28ea109c1f349 Mon Sep 17 00:00:00 2001
+From: Robin Murphy <robin.murphy@arm.com>
+Date: Tue, 10 Jan 2017 17:51:17 +0000
+Subject: virtio_mmio: Set DMA masks appropriately
+
+From: Robin Murphy <robin.murphy@arm.com>
+
+commit f7f6634d23830ff74335734fbdb28ea109c1f349 upstream.
+
+Once DMA API usage is enabled, it becomes apparent that virtio-mmio is
+inadvertently relying on the default 32-bit DMA mask, which leads to
+problems like rapidly exhausting SWIOTLB bounce buffers.
+
+Ensure that we set the appropriate 64-bit DMA mask whenever possible,
+with the coherent mask suitably limited for the legacy vring as per
+a0be1db4304f ("virtio_pci: Limit DMA mask to 44 bits for legacy virtio
+devices").
+
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Reported-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+Fixes: b42111382f0e ("virtio_mmio: Use the DMA API if enabled")
+Signed-off-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/virtio/virtio_mmio.c | 20 +++++++++++++++++++-
+ 1 file changed, 19 insertions(+), 1 deletion(-)
+
+--- a/drivers/virtio/virtio_mmio.c
++++ b/drivers/virtio/virtio_mmio.c
+@@ -59,6 +59,7 @@
+ #define pr_fmt(fmt) "virtio-mmio: " fmt
+
+ #include <linux/acpi.h>
++#include <linux/dma-mapping.h>
+ #include <linux/highmem.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+@@ -497,6 +498,7 @@ static int virtio_mmio_probe(struct plat
+ struct virtio_mmio_device *vm_dev;
+ struct resource *mem;
+ unsigned long magic;
++ int rc;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+@@ -545,9 +547,25 @@ static int virtio_mmio_probe(struct plat
+ }
+ vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
+
+- if (vm_dev->version == 1)
++ if (vm_dev->version == 1) {
+ writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
+
++ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
++ /*
++ * In the legacy case, ensure our coherently-allocated virtio
++ * ring will be at an address expressable as a 32-bit PFN.
++ */
++ if (!rc)
++ dma_set_coherent_mask(&pdev->dev,
++ DMA_BIT_MASK(32 + PAGE_SHIFT));
++ } else {
++ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
++ }
++ if (rc)
++ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++ if (rc)
++ dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
++
+ platform_set_drvdata(pdev, vm_dev);
+
+ return register_virtio_device(&vm_dev->vdev);