--- /dev/null
+From 60b89f1928af80b546b5c3fd8714a62f6f4b8844 Mon Sep 17 00:00:00 2001
+From: Nicolas Ferre <nicolas.ferre@microchip.com>
+Date: Tue, 14 Mar 2017 09:38:04 +0100
+Subject: ARM: at91: pm: cpu_idle: switch DDR to power-down mode
+
+From: Nicolas Ferre <nicolas.ferre@microchip.com>
+
+commit 60b89f1928af80b546b5c3fd8714a62f6f4b8844 upstream.
+
+On some DDR controllers, compatible with the sama5d3 one,
+the sequence to enter/exit/re-enter the self-refresh mode adds
+more constrains than what is currently written in the at91_idle
+driver. An actual access to the DDR chip is needed between exit
+and re-enter of this mode which is somehow difficult to implement.
+This sequence can completely hang the SoC. It is particularly
+experienced on parts which embed a L2 cache if the code run
+between IDLE calls fits in it...
+
+Moreover, as the intention is to enter and exit pretty rapidly
+from IDLE, the power-down mode is a good candidate.
+
+So now we use power-down instead of self-refresh. As we can
+simplify the code for sama5d3 compatible DDR controllers,
+we instantiate a new sama5d3_ddr_standby() function.
+
+Signed-off-by: Nicolas Ferre <nicolas.ferre@microchip.com>
+Fixes: 017b5522d5e3 ("ARM: at91: Add new binding for sama5d3-ddramc")
+Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-at91/pm.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -289,6 +289,22 @@ static void at91_ddr_standby(void)
+ at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
+ }
+
++static void sama5d3_ddr_standby(void)
++{
++ u32 lpr0;
++ u32 saved_lpr0;
++
++ saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
++ lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
++ lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
++
++ at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
++
++ cpu_do_idle();
++
++ at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
++}
++
+ /* We manage both DDRAM/SDRAM controllers, we need more than one value to
+ * remember.
+ */
+@@ -323,7 +339,7 @@ static const struct of_device_id const r
+ { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
+ { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
+ { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
+- { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby },
++ { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
+ { /*sentinel*/ }
+ };
+
--- /dev/null
+From afd0e5a876703accb95894f23317a13e2c49b523 Mon Sep 17 00:00:00 2001
+From: Neeraj Upadhyay <neeraju@codeaurora.org>
+Date: Wed, 22 Mar 2017 17:08:25 +0530
+Subject: arm64: kaslr: Fix up the kernel image alignment
+
+From: Neeraj Upadhyay <neeraju@codeaurora.org>
+
+commit afd0e5a876703accb95894f23317a13e2c49b523 upstream.
+
+If kernel image extends across alignment boundary, existing
+code increases the KASLR offset by size of kernel image. The
+offset is masked after resizing. There are cases, where after
+masking, we may still have kernel image extending across
+boundary. This eventually results in only 2MB block getting
+mapped while creating the page tables. This results in data aborts
+while accessing unmapped regions during second relocation (with
+kaslr offset) in __primary_switch. To fix this problem, round up the
+kernel image size, by swapper block size, before adding it for
+correction.
+
+For example consider below case, where kernel image still crosses
+1GB alignment boundary, after masking the offset, which is fixed
+by rounding up kernel image size.
+
+SWAPPER_TABLE_SHIFT = 30
+Swapper using section maps with section size 2MB.
+CONFIG_PGTABLE_LEVELS = 3
+VA_BITS = 39
+
+_text : 0xffffff8008080000
+_end : 0xffffff800aa1b000
+offset : 0x1f35600000
+mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1)
+
+(_text + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7c
+(_end + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d
+
+offset after existing correction (before mask) = 0x1f37f9b000
+(_text + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d
+(_end + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d
+
+offset (after mask) = 0x1f37e00000
+(_text + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7c
+(_end + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d
+
+new offset w/ rounding up = 0x1f38000000
+(_text + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d
+(_end + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d
+
+Fixes: f80fb3a3d508 ("arm64: add support for kernel ASLR")
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Neeraj Upadhyay <neeraju@codeaurora.org>
+Signed-off-by: Srinivas Ramana <sramana@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/kaslr.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kernel/kaslr.c
++++ b/arch/arm64/kernel/kaslr.c
+@@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys,
+ /*
+ * The kernel Image should not extend across a 1GB/32MB/512MB alignment
+ * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
+- * happens, increase the KASLR offset by the size of the kernel image.
++ * happens, increase the KASLR offset by the size of the kernel image
++ * rounded up by SWAPPER_BLOCK_SIZE.
+ */
+ if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
+- (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
+- offset = (offset + (u64)(_end - _text)) & mask;
++ (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
++ u64 kimg_sz = _end - _text;
++ offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
++ & mask;
++ }
+
+ if (IS_ENABLED(CONFIG_KASAN))
+ /*
--- /dev/null
+From 6be3b6cce1e225f189b68b4e84fc711d19b4277b Mon Sep 17 00:00:00 2001
+From: Ryan Hsu <ryanhsu@qca.qualcomm.com>
+Date: Mon, 13 Mar 2017 15:49:03 -0700
+Subject: ath10k: fix incorrect wlan_mac_base in qca6174_regs
+
+From: Ryan Hsu <ryanhsu@qca.qualcomm.com>
+
+commit 6be3b6cce1e225f189b68b4e84fc711d19b4277b upstream.
+
+In the 'commit ebee76f7fa46 ("ath10k: allow setting coverage class")',
+it inherits the design and the address offset from ath9k, but the address
+is not applicable to QCA6174, which leads to a random crash while doing the
+resume() operation, since the set_coverage_class.ops will be called from
+ieee80211_reconfig() when resume() (if the wow is not configured).
+
+Fix the incorrect address offset here to avoid the random crash.
+
+Verified on QCA6174/hw3.0 with firmware WLAN.RM.4.4-00022-QCARMSWPZ-2.
+
+kvalo: this also seems to fix a regression with firmware restart.
+
+Fixes: ebee76f7fa46 ("ath10k: allow setting coverage class")
+Signed-off-by: Ryan Hsu <ryanhsu@qca.qualcomm.com>
+Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/ath/ath10k/hw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/ath/ath10k/hw.c
++++ b/drivers/net/wireless/ath/ath10k/hw.c
+@@ -51,7 +51,7 @@ const struct ath10k_hw_regs qca6174_regs
+ .rtc_soc_base_address = 0x00000800,
+ .rtc_wmac_base_address = 0x00001000,
+ .soc_core_base_address = 0x0003a000,
+- .wlan_mac_base_address = 0x00020000,
++ .wlan_mac_base_address = 0x00010000,
+ .ce_wrapper_base_address = 0x00034000,
+ .ce0_base_address = 0x00034400,
+ .ce1_base_address = 0x00034800,
--- /dev/null
+From 5b52330bbfe63b3305765354d6046c9f7f89c011 Mon Sep 17 00:00:00 2001
+From: Paul Moore <paul@paul-moore.com>
+Date: Tue, 21 Mar 2017 11:26:35 -0400
+Subject: audit: fix auditd/kernel connection state tracking
+
+From: Paul Moore <paul@paul-moore.com>
+
+commit 5b52330bbfe63b3305765354d6046c9f7f89c011 upstream.
+
+What started as a rather straightforward race condition reported by
+Dmitry using the syzkaller fuzzer ended up revealing some major
+problems with how the audit subsystem managed its netlink sockets and
+its connection with the userspace audit daemon. Fixing this properly
+had quite the cascading effect and what we are left with is this rather
+large and complicated patch. My initial goal was to try and decompose
+this patch into multiple smaller patches, but the way these changes
+are intertwined makes it difficult to split these changes into
+meaningful pieces that don't break or somehow make things worse for
+the intermediate states.
+
+The patch makes a number of changes, but the most significant are
+highlighted below:
+
+* The auditd tracking variables, e.g. audit_sock, are now gone and
+replaced by a RCU/spin_lock protected variable auditd_conn which is
+a structure containing all of the auditd tracking information.
+
+* We no longer track the auditd sock directly, instead we track it
+via the network namespace in which it resides and we use the audit
+socket associated with that namespace. In spirit, this is what the
+code was trying to do prior to this patch (at least I think that is
+what the original authors intended), but it was done rather poorly
+and added a layer of obfuscation that only masked the underlying
+problems.
+
+* Big backlog queue cleanup, again. In v4.10 we made some pretty big
+changes to how the audit backlog queues work, here we haven't changed
+the queue design so much as cleaned up the implementation. Brought
+about by the locking changes, we've simplified kauditd_thread() quite
+a bit by consolidating the queue handling into a new helper function,
+kauditd_send_queue(), which allows us to eliminate a lot of very
+similar code and makes the looping logic in kauditd_thread() clearer.
+
+* All netlink messages sent to auditd are now sent via
+auditd_send_unicast_skb(). Other than just making sense, this makes
+the lock handling easier.
+
+* Change the audit_log_start() sleep behavior so that we never sleep
+on auditd events (unchanged) or if the caller is holding the
+audit_cmd_mutex (changed). Previously we didn't sleep if the caller
+was auditd or if the message type fell between a certain range; the
+type check was a poor effort of doing what the cmd_mutex check now
+does. Richard Guy Briggs originally proposed not sleeping the
+cmd_mutex owner several years ago but his patch wasn't acceptable
+at the time. At least the idea lives on here.
+
+* A problem with the lost record counter has been resolved. Steve
+Grubb and I both happened to notice this problem and according to
+some quick testing by Steve, this problem goes back quite some time.
+It's largely a harmless problem, although it may have left some
+careful sysadmins quite puzzled.
+
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/audit.c | 643 +++++++++++++++++++++++++++++++++----------------------
+ kernel/audit.h | 9
+ kernel/auditsc.c | 6
+ 3 files changed, 401 insertions(+), 257 deletions(-)
+
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -54,6 +54,10 @@
+ #include <linux/kthread.h>
+ #include <linux/kernel.h>
+ #include <linux/syscalls.h>
++#include <linux/spinlock.h>
++#include <linux/rcupdate.h>
++#include <linux/mutex.h>
++#include <linux/gfp.h>
+
+ #include <linux/audit.h>
+
+@@ -90,13 +94,34 @@ static u32 audit_default;
+ /* If auditing cannot proceed, audit_failure selects what happens. */
+ static u32 audit_failure = AUDIT_FAIL_PRINTK;
+
+-/*
+- * If audit records are to be written to the netlink socket, audit_pid
+- * contains the pid of the auditd process and audit_nlk_portid contains
+- * the portid to use to send netlink messages to that process.
++/* private audit network namespace index */
++static unsigned int audit_net_id;
++
++/**
++ * struct audit_net - audit private network namespace data
++ * @sk: communication socket
+ */
+-int audit_pid;
+-static __u32 audit_nlk_portid;
++struct audit_net {
++ struct sock *sk;
++};
++
++/**
++ * struct auditd_connection - kernel/auditd connection state
++ * @pid: auditd PID
++ * @portid: netlink portid
++ * @net: the associated network namespace
++ * @lock: spinlock to protect write access
++ *
++ * Description:
++ * This struct is RCU protected; you must either hold the RCU lock for reading
++ * or the included spinlock for writing.
++ */
++static struct auditd_connection {
++ int pid;
++ u32 portid;
++ struct net *net;
++ spinlock_t lock;
++} auditd_conn;
+
+ /* If audit_rate_limit is non-zero, limit the rate of sending audit records
+ * to that number per second. This prevents DoS attacks, but results in
+@@ -123,10 +148,6 @@ u32 audit_sig_sid = 0;
+ */
+ static atomic_t audit_lost = ATOMIC_INIT(0);
+
+-/* The netlink socket. */
+-static struct sock *audit_sock;
+-static unsigned int audit_net_id;
+-
+ /* Hash for inode-based rules */
+ struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
+
+@@ -139,6 +160,7 @@ static LIST_HEAD(audit_freelist);
+
+ /* queue msgs to send via kauditd_task */
+ static struct sk_buff_head audit_queue;
++static void kauditd_hold_skb(struct sk_buff *skb);
+ /* queue msgs due to temporary unicast send problems */
+ static struct sk_buff_head audit_retry_queue;
+ /* queue msgs waiting for new auditd connection */
+@@ -192,6 +214,43 @@ struct audit_reply {
+ struct sk_buff *skb;
+ };
+
++/**
++ * auditd_test_task - Check to see if a given task is an audit daemon
++ * @task: the task to check
++ *
++ * Description:
++ * Return 1 if the task is a registered audit daemon, 0 otherwise.
++ */
++int auditd_test_task(const struct task_struct *task)
++{
++ int rc;
++
++ rcu_read_lock();
++ rc = (auditd_conn.pid && task->tgid == auditd_conn.pid ? 1 : 0);
++ rcu_read_unlock();
++
++ return rc;
++}
++
++/**
++ * audit_get_sk - Return the audit socket for the given network namespace
++ * @net: the destination network namespace
++ *
++ * Description:
++ * Returns the sock pointer if valid, NULL otherwise. The caller must ensure
++ * that a reference is held for the network namespace while the sock is in use.
++ */
++static struct sock *audit_get_sk(const struct net *net)
++{
++ struct audit_net *aunet;
++
++ if (!net)
++ return NULL;
++
++ aunet = net_generic(net, audit_net_id);
++ return aunet->sk;
++}
++
+ static void audit_set_portid(struct audit_buffer *ab, __u32 portid)
+ {
+ if (ab) {
+@@ -210,9 +269,7 @@ void audit_panic(const char *message)
+ pr_err("%s\n", message);
+ break;
+ case AUDIT_FAIL_PANIC:
+- /* test audit_pid since printk is always losey, why bother? */
+- if (audit_pid)
+- panic("audit: %s\n", message);
++ panic("audit: %s\n", message);
+ break;
+ }
+ }
+@@ -370,21 +427,87 @@ static int audit_set_failure(u32 state)
+ return audit_do_config_change("audit_failure", &audit_failure, state);
+ }
+
+-/*
+- * For one reason or another this nlh isn't getting delivered to the userspace
+- * audit daemon, just send it to printk.
++/**
++ * auditd_set - Set/Reset the auditd connection state
++ * @pid: auditd PID
++ * @portid: auditd netlink portid
++ * @net: auditd network namespace pointer
++ *
++ * Description:
++ * This function will obtain and drop network namespace references as
++ * necessary.
++ */
++static void auditd_set(int pid, u32 portid, struct net *net)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&auditd_conn.lock, flags);
++ auditd_conn.pid = pid;
++ auditd_conn.portid = portid;
++ if (auditd_conn.net)
++ put_net(auditd_conn.net);
++ if (net)
++ auditd_conn.net = get_net(net);
++ else
++ auditd_conn.net = NULL;
++ spin_unlock_irqrestore(&auditd_conn.lock, flags);
++}
++
++/**
++ * auditd_reset - Disconnect the auditd connection
++ *
++ * Description:
++ * Break the auditd/kauditd connection and move all the queued records into the
++ * hold queue in case auditd reconnects.
++ */
++static void auditd_reset(void)
++{
++ struct sk_buff *skb;
++
++ /* if it isn't already broken, break the connection */
++ rcu_read_lock();
++ if (auditd_conn.pid)
++ auditd_set(0, 0, NULL);
++ rcu_read_unlock();
++
++ /* flush all of the main and retry queues to the hold queue */
++ while ((skb = skb_dequeue(&audit_retry_queue)))
++ kauditd_hold_skb(skb);
++ while ((skb = skb_dequeue(&audit_queue)))
++ kauditd_hold_skb(skb);
++}
++
++/**
++ * kauditd_print_skb - Print the audit record to the ring buffer
++ * @skb: audit record
++ *
++ * Whatever the reason, this packet may not make it to the auditd connection
++ * so write it via printk so the information isn't completely lost.
+ */
+ static void kauditd_printk_skb(struct sk_buff *skb)
+ {
+ struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ char *data = nlmsg_data(nlh);
+
+- if (nlh->nlmsg_type != AUDIT_EOE) {
+- if (printk_ratelimit())
+- pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
+- else
+- audit_log_lost("printk limit exceeded");
+- }
++ if (nlh->nlmsg_type != AUDIT_EOE && printk_ratelimit())
++ pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
++}
++
++/**
++ * kauditd_rehold_skb - Handle a audit record send failure in the hold queue
++ * @skb: audit record
++ *
++ * Description:
++ * This should only be used by the kauditd_thread when it fails to flush the
++ * hold queue.
++ */
++static void kauditd_rehold_skb(struct sk_buff *skb)
++{
++ /* put the record back in the queue at the same place */
++ skb_queue_head(&audit_hold_queue, skb);
++
++ /* fail the auditd connection */
++ auditd_reset();
+ }
+
+ /**
+@@ -421,6 +544,9 @@ static void kauditd_hold_skb(struct sk_b
+ /* we have no other options - drop the message */
+ audit_log_lost("kauditd hold queue overflow");
+ kfree_skb(skb);
++
++ /* fail the auditd connection */
++ auditd_reset();
+ }
+
+ /**
+@@ -441,51 +567,122 @@ static void kauditd_retry_skb(struct sk_
+ }
+
+ /**
+- * auditd_reset - Disconnect the auditd connection
++ * auditd_send_unicast_skb - Send a record via unicast to auditd
++ * @skb: audit record
+ *
+ * Description:
+- * Break the auditd/kauditd connection and move all the records in the retry
+- * queue into the hold queue in case auditd reconnects. The audit_cmd_mutex
+- * must be held when calling this function.
++ * Send a skb to the audit daemon, returns positive/zero values on success and
++ * negative values on failure; in all cases the skb will be consumed by this
++ * function. If the send results in -ECONNREFUSED the connection with auditd
++ * will be reset. This function may sleep so callers should not hold any locks
++ * where this would cause a problem.
+ */
+-static void auditd_reset(void)
++static int auditd_send_unicast_skb(struct sk_buff *skb)
+ {
+- struct sk_buff *skb;
++ int rc;
++ u32 portid;
++ struct net *net;
++ struct sock *sk;
+
+- /* break the connection */
+- if (audit_sock) {
+- sock_put(audit_sock);
+- audit_sock = NULL;
++ /* NOTE: we can't call netlink_unicast while in the RCU section so
++ * take a reference to the network namespace and grab local
++ * copies of the namespace, the sock, and the portid; the
++ * namespace and sock aren't going to go away while we hold a
++ * reference and if the portid does become invalid after the RCU
++ * section netlink_unicast() should safely return an error */
++
++ rcu_read_lock();
++ if (!auditd_conn.pid) {
++ rcu_read_unlock();
++ rc = -ECONNREFUSED;
++ goto err;
+ }
+- audit_pid = 0;
+- audit_nlk_portid = 0;
++ net = auditd_conn.net;
++ get_net(net);
++ sk = audit_get_sk(net);
++ portid = auditd_conn.portid;
++ rcu_read_unlock();
+
+- /* flush all of the retry queue to the hold queue */
+- while ((skb = skb_dequeue(&audit_retry_queue)))
+- kauditd_hold_skb(skb);
++ rc = netlink_unicast(sk, skb, portid, 0);
++ put_net(net);
++ if (rc < 0)
++ goto err;
++
++ return rc;
++
++err:
++ if (rc == -ECONNREFUSED)
++ auditd_reset();
++ return rc;
+ }
+
+ /**
+- * kauditd_send_unicast_skb - Send a record via unicast to auditd
+- * @skb: audit record
++ * kauditd_send_queue - Helper for kauditd_thread to flush skb queues
++ * @sk: the sending sock
++ * @portid: the netlink destination
++ * @queue: the skb queue to process
++ * @retry_limit: limit on number of netlink unicast failures
++ * @skb_hook: per-skb hook for additional processing
++ * @err_hook: hook called if the skb fails the netlink unicast send
++ *
++ * Description:
++ * Run through the given queue and attempt to send the audit records to auditd,
++ * returns zero on success, negative values on failure. It is up to the caller
++ * to ensure that the @sk is valid for the duration of this function.
++ *
+ */
+-static int kauditd_send_unicast_skb(struct sk_buff *skb)
++static int kauditd_send_queue(struct sock *sk, u32 portid,
++ struct sk_buff_head *queue,
++ unsigned int retry_limit,
++ void (*skb_hook)(struct sk_buff *skb),
++ void (*err_hook)(struct sk_buff *skb))
+ {
+- int rc;
++ int rc = 0;
++ struct sk_buff *skb;
++ static unsigned int failed = 0;
+
+- /* if we know nothing is connected, don't even try the netlink call */
+- if (!audit_pid)
+- return -ECONNREFUSED;
++ /* NOTE: kauditd_thread takes care of all our locking, we just use
++ * the netlink info passed to us (e.g. sk and portid) */
++
++ while ((skb = skb_dequeue(queue))) {
++ /* call the skb_hook for each skb we touch */
++ if (skb_hook)
++ (*skb_hook)(skb);
++
++ /* can we send to anyone via unicast? */
++ if (!sk) {
++ if (err_hook)
++ (*err_hook)(skb);
++ continue;
++ }
+
+- /* get an extra skb reference in case we fail to send */
+- skb_get(skb);
+- rc = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+- if (rc >= 0) {
+- consume_skb(skb);
+- rc = 0;
++ /* grab an extra skb reference in case of error */
++ skb_get(skb);
++ rc = netlink_unicast(sk, skb, portid, 0);
++ if (rc < 0) {
++ /* fatal failure for our queue flush attempt? */
++ if (++failed >= retry_limit ||
++ rc == -ECONNREFUSED || rc == -EPERM) {
++ /* yes - error processing for the queue */
++ sk = NULL;
++ if (err_hook)
++ (*err_hook)(skb);
++ if (!skb_hook)
++ goto out;
++ /* keep processing with the skb_hook */
++ continue;
++ } else
++ /* no - requeue to preserve ordering */
++ skb_queue_head(queue, skb);
++ } else {
++ /* it worked - drop the extra reference and continue */
++ consume_skb(skb);
++ failed = 0;
++ }
+ }
+
+- return rc;
++out:
++ return (rc >= 0 ? 0 : rc);
+ }
+
+ /*
+@@ -493,16 +690,19 @@ static int kauditd_send_unicast_skb(stru
+ * @skb: audit record
+ *
+ * Description:
+- * This function doesn't consume an skb as might be expected since it has to
+- * copy it anyways.
++ * Write a multicast message to anyone listening in the initial network
++ * namespace. This function doesn't consume an skb as might be expected since
++ * it has to copy it anyways.
+ */
+ static void kauditd_send_multicast_skb(struct sk_buff *skb)
+ {
+ struct sk_buff *copy;
+- struct audit_net *aunet = net_generic(&init_net, audit_net_id);
+- struct sock *sock = aunet->nlsk;
++ struct sock *sock = audit_get_sk(&init_net);
+ struct nlmsghdr *nlh;
+
++ /* NOTE: we are not taking an additional reference for init_net since
++ * we don't have to worry about it going away */
++
+ if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
+ return;
+
+@@ -526,149 +726,75 @@ static void kauditd_send_multicast_skb(s
+ }
+
+ /**
+- * kauditd_wake_condition - Return true when it is time to wake kauditd_thread
+- *
+- * Description:
+- * This function is for use by the wait_event_freezable() call in
+- * kauditd_thread().
++ * kauditd_thread - Worker thread to send audit records to userspace
++ * @dummy: unused
+ */
+-static int kauditd_wake_condition(void)
+-{
+- static int pid_last = 0;
+- int rc;
+- int pid = audit_pid;
+-
+- /* wake on new messages or a change in the connected auditd */
+- rc = skb_queue_len(&audit_queue) || (pid && pid != pid_last);
+- if (rc)
+- pid_last = pid;
+-
+- return rc;
+-}
+-
+ static int kauditd_thread(void *dummy)
+ {
+ int rc;
+- int auditd = 0;
+- int reschedule = 0;
+- struct sk_buff *skb;
+- struct nlmsghdr *nlh;
++ u32 portid = 0;
++ struct net *net = NULL;
++ struct sock *sk = NULL;
+
+ #define UNICAST_RETRIES 5
+-#define AUDITD_BAD(x,y) \
+- ((x) == -ECONNREFUSED || (x) == -EPERM || ++(y) >= UNICAST_RETRIES)
+-
+- /* NOTE: we do invalidate the auditd connection flag on any sending
+- * errors, but we only "restore" the connection flag at specific places
+- * in the loop in order to help ensure proper ordering of audit
+- * records */
+
+ set_freezable();
+ while (!kthread_should_stop()) {
+- /* NOTE: possible area for future improvement is to look at
+- * the hold and retry queues, since only this thread
+- * has access to these queues we might be able to do
+- * our own queuing and skip some/all of the locking */
+-
+- /* NOTE: it might be a fun experiment to split the hold and
+- * retry queue handling to another thread, but the
+- * synchronization issues and other overhead might kill
+- * any performance gains */
++ /* NOTE: see the lock comments in auditd_send_unicast_skb() */
++ rcu_read_lock();
++ if (!auditd_conn.pid) {
++ rcu_read_unlock();
++ goto main_queue;
++ }
++ net = auditd_conn.net;
++ get_net(net);
++ sk = audit_get_sk(net);
++ portid = auditd_conn.portid;
++ rcu_read_unlock();
+
+ /* attempt to flush the hold queue */
+- while (auditd && (skb = skb_dequeue(&audit_hold_queue))) {
+- rc = kauditd_send_unicast_skb(skb);
+- if (rc) {
+- /* requeue to the same spot */
+- skb_queue_head(&audit_hold_queue, skb);
+-
+- auditd = 0;
+- if (AUDITD_BAD(rc, reschedule)) {
+- mutex_lock(&audit_cmd_mutex);
+- auditd_reset();
+- mutex_unlock(&audit_cmd_mutex);
+- reschedule = 0;
+- }
+- } else
+- /* we were able to send successfully */
+- reschedule = 0;
++ rc = kauditd_send_queue(sk, portid,
++ &audit_hold_queue, UNICAST_RETRIES,
++ NULL, kauditd_rehold_skb);
++ if (rc < 0) {
++ sk = NULL;
++ goto main_queue;
+ }
+
+ /* attempt to flush the retry queue */
+- while (auditd && (skb = skb_dequeue(&audit_retry_queue))) {
+- rc = kauditd_send_unicast_skb(skb);
+- if (rc) {
+- auditd = 0;
+- if (AUDITD_BAD(rc, reschedule)) {
+- kauditd_hold_skb(skb);
+- mutex_lock(&audit_cmd_mutex);
+- auditd_reset();
+- mutex_unlock(&audit_cmd_mutex);
+- reschedule = 0;
+- } else
+- /* temporary problem (we hope), queue
+- * to the same spot and retry */
+- skb_queue_head(&audit_retry_queue, skb);
+- } else
+- /* we were able to send successfully */
+- reschedule = 0;
+- }
+-
+- /* standard queue processing, try to be as quick as possible */
+-quick_loop:
+- skb = skb_dequeue(&audit_queue);
+- if (skb) {
+- /* setup the netlink header, see the comments in
+- * kauditd_send_multicast_skb() for length quirks */
+- nlh = nlmsg_hdr(skb);
+- nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
+-
+- /* attempt to send to any multicast listeners */
+- kauditd_send_multicast_skb(skb);
+-
+- /* attempt to send to auditd, queue on failure */
+- if (auditd) {
+- rc = kauditd_send_unicast_skb(skb);
+- if (rc) {
+- auditd = 0;
+- if (AUDITD_BAD(rc, reschedule)) {
+- mutex_lock(&audit_cmd_mutex);
+- auditd_reset();
+- mutex_unlock(&audit_cmd_mutex);
+- reschedule = 0;
+- }
+-
+- /* move to the retry queue */
+- kauditd_retry_skb(skb);
+- } else
+- /* everything is working so go fast! */
+- goto quick_loop;
+- } else if (reschedule)
+- /* we are currently having problems, move to
+- * the retry queue */
+- kauditd_retry_skb(skb);
+- else
+- /* dump the message via printk and hold it */
+- kauditd_hold_skb(skb);
+- } else {
+- /* we have flushed the backlog so wake everyone */
+- wake_up(&audit_backlog_wait);
+-
+- /* if everything is okay with auditd (if present), go
+- * to sleep until there is something new in the queue
+- * or we have a change in the connected auditd;
+- * otherwise simply reschedule to give things a chance
+- * to recover */
+- if (reschedule) {
+- set_current_state(TASK_INTERRUPTIBLE);
+- schedule();
+- } else
+- wait_event_freezable(kauditd_wait,
+- kauditd_wake_condition());
+-
+- /* update the auditd connection status */
+- auditd = (audit_pid ? 1 : 0);
+- }
++ rc = kauditd_send_queue(sk, portid,
++ &audit_retry_queue, UNICAST_RETRIES,
++ NULL, kauditd_hold_skb);
++ if (rc < 0) {
++ sk = NULL;
++ goto main_queue;
++ }
++
++main_queue:
++ /* process the main queue - do the multicast send and attempt
++ * unicast, dump failed record sends to the retry queue; if
++ * sk == NULL due to previous failures we will just do the
++ * multicast send and move the record to the retry queue */
++ kauditd_send_queue(sk, portid, &audit_queue, 1,
++ kauditd_send_multicast_skb,
++ kauditd_retry_skb);
++
++ /* drop our netns reference, no auditd sends past this line */
++ if (net) {
++ put_net(net);
++ net = NULL;
++ }
++ sk = NULL;
++
++ /* we have processed all the queues so wake everyone */
++ wake_up(&audit_backlog_wait);
++
++ /* NOTE: we want to wake up if there is anything on the queue,
++ * regardless of if an auditd is connected, as we need to
++ * do the multicast send and rotate records from the
++ * main queue to the retry/hold queues */
++ wait_event_freezable(kauditd_wait,
++ (skb_queue_len(&audit_queue) ? 1 : 0));
+ }
+
+ return 0;
+@@ -678,17 +804,16 @@ int audit_send_list(void *_dest)
+ {
+ struct audit_netlink_list *dest = _dest;
+ struct sk_buff *skb;
+- struct net *net = dest->net;
+- struct audit_net *aunet = net_generic(net, audit_net_id);
++ struct sock *sk = audit_get_sk(dest->net);
+
+ /* wait for parent to finish and send an ACK */
+ mutex_lock(&audit_cmd_mutex);
+ mutex_unlock(&audit_cmd_mutex);
+
+ while ((skb = __skb_dequeue(&dest->q)) != NULL)
+- netlink_unicast(aunet->nlsk, skb, dest->portid, 0);
++ netlink_unicast(sk, skb, dest->portid, 0);
+
+- put_net(net);
++ put_net(dest->net);
+ kfree(dest);
+
+ return 0;
+@@ -722,16 +847,15 @@ out_kfree_skb:
+ static int audit_send_reply_thread(void *arg)
+ {
+ struct audit_reply *reply = (struct audit_reply *)arg;
+- struct net *net = reply->net;
+- struct audit_net *aunet = net_generic(net, audit_net_id);
++ struct sock *sk = audit_get_sk(reply->net);
+
+ mutex_lock(&audit_cmd_mutex);
+ mutex_unlock(&audit_cmd_mutex);
+
+ /* Ignore failure. It'll only happen if the sender goes away,
+ because our timeout is set to infinite. */
+- netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0);
+- put_net(net);
++ netlink_unicast(sk, reply->skb, reply->portid, 0);
++ put_net(reply->net);
+ kfree(reply);
+ return 0;
+ }
+@@ -949,12 +1073,12 @@ static int audit_set_feature(struct sk_b
+
+ static int audit_replace(pid_t pid)
+ {
+- struct sk_buff *skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0,
+- &pid, sizeof(pid));
++ struct sk_buff *skb;
+
++ skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, &pid, sizeof(pid));
+ if (!skb)
+ return -ENOMEM;
+- return netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
++ return auditd_send_unicast_skb(skb);
+ }
+
+ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+@@ -981,7 +1105,9 @@ static int audit_receive_msg(struct sk_b
+ memset(&s, 0, sizeof(s));
+ s.enabled = audit_enabled;
+ s.failure = audit_failure;
+- s.pid = audit_pid;
++ rcu_read_lock();
++ s.pid = auditd_conn.pid;
++ rcu_read_unlock();
+ s.rate_limit = audit_rate_limit;
+ s.backlog_limit = audit_backlog_limit;
+ s.lost = atomic_read(&audit_lost);
+@@ -1014,30 +1140,44 @@ static int audit_receive_msg(struct sk_b
+ * from the initial pid namespace, but something
+ * to keep in mind if this changes */
+ int new_pid = s.pid;
++ pid_t auditd_pid;
+ pid_t requesting_pid = task_tgid_vnr(current);
+
+- if ((!new_pid) && (requesting_pid != audit_pid)) {
+- audit_log_config_change("audit_pid", new_pid, audit_pid, 0);
++ /* test the auditd connection */
++ audit_replace(requesting_pid);
++
++ rcu_read_lock();
++ auditd_pid = auditd_conn.pid;
++ /* only the current auditd can unregister itself */
++ if ((!new_pid) && (requesting_pid != auditd_pid)) {
++ rcu_read_unlock();
++ audit_log_config_change("audit_pid", new_pid,
++ auditd_pid, 0);
+ return -EACCES;
+ }
+- if (audit_pid && new_pid &&
+- audit_replace(requesting_pid) != -ECONNREFUSED) {
+- audit_log_config_change("audit_pid", new_pid, audit_pid, 0);
++ /* replacing a healthy auditd is not allowed */
++ if (auditd_pid && new_pid) {
++ rcu_read_unlock();
++ audit_log_config_change("audit_pid", new_pid,
++ auditd_pid, 0);
+ return -EEXIST;
+ }
++ rcu_read_unlock();
++
+ if (audit_enabled != AUDIT_OFF)
+- audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
++ audit_log_config_change("audit_pid", new_pid,
++ auditd_pid, 1);
++
+ if (new_pid) {
+- if (audit_sock)
+- sock_put(audit_sock);
+- audit_pid = new_pid;
+- audit_nlk_portid = NETLINK_CB(skb).portid;
+- sock_hold(skb->sk);
+- audit_sock = skb->sk;
+- } else {
++ /* register a new auditd connection */
++ auditd_set(new_pid,
++ NETLINK_CB(skb).portid,
++ sock_net(NETLINK_CB(skb).sk));
++ /* try to process any backlog */
++ wake_up_interruptible(&kauditd_wait);
++ } else
++ /* unregister the auditd connection */
+ auditd_reset();
+- }
+- wake_up_interruptible(&kauditd_wait);
+ }
+ if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
+ err = audit_set_rate_limit(s.rate_limit);
+@@ -1084,7 +1224,6 @@ static int audit_receive_msg(struct sk_b
+ if (err)
+ break;
+ }
+- mutex_unlock(&audit_cmd_mutex);
+ audit_log_common_recv_msg(&ab, msg_type);
+ if (msg_type != AUDIT_USER_TTY)
+ audit_log_format(ab, " msg='%.*s'",
+@@ -1102,7 +1241,6 @@ static int audit_receive_msg(struct sk_b
+ }
+ audit_set_portid(ab, NETLINK_CB(skb).portid);
+ audit_log_end(ab);
+- mutex_lock(&audit_cmd_mutex);
+ }
+ break;
+ case AUDIT_ADD_RULE:
+@@ -1292,26 +1430,26 @@ static int __net_init audit_net_init(str
+
+ struct audit_net *aunet = net_generic(net, audit_net_id);
+
+- aunet->nlsk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg);
+- if (aunet->nlsk == NULL) {
++ aunet->sk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg);
++ if (aunet->sk == NULL) {
+ audit_panic("cannot initialize netlink socket in namespace");
+ return -ENOMEM;
+ }
+- aunet->nlsk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
++ aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
++
+ return 0;
+ }
+
+ static void __net_exit audit_net_exit(struct net *net)
+ {
+ struct audit_net *aunet = net_generic(net, audit_net_id);
+- struct sock *sock = aunet->nlsk;
+- mutex_lock(&audit_cmd_mutex);
+- if (sock == audit_sock)
++
++ rcu_read_lock();
++ if (net == auditd_conn.net)
+ auditd_reset();
+- mutex_unlock(&audit_cmd_mutex);
++ rcu_read_unlock();
+
+- netlink_kernel_release(sock);
+- aunet->nlsk = NULL;
++ netlink_kernel_release(aunet->sk);
+ }
+
+ static struct pernet_operations audit_net_ops __net_initdata = {
+@@ -1329,20 +1467,24 @@ static int __init audit_init(void)
+ if (audit_initialized == AUDIT_DISABLED)
+ return 0;
+
+- pr_info("initializing netlink subsys (%s)\n",
+- audit_default ? "enabled" : "disabled");
+- register_pernet_subsys(&audit_net_ops);
++ memset(&auditd_conn, 0, sizeof(auditd_conn));
++ spin_lock_init(&auditd_conn.lock);
+
+ skb_queue_head_init(&audit_queue);
+ skb_queue_head_init(&audit_retry_queue);
+ skb_queue_head_init(&audit_hold_queue);
+- audit_initialized = AUDIT_INITIALIZED;
+- audit_enabled = audit_default;
+- audit_ever_enabled |= !!audit_default;
+
+ for (i = 0; i < AUDIT_INODE_BUCKETS; i++)
+ INIT_LIST_HEAD(&audit_inode_hash[i]);
+
++ pr_info("initializing netlink subsys (%s)\n",
++ audit_default ? "enabled" : "disabled");
++ register_pernet_subsys(&audit_net_ops);
++
++ audit_initialized = AUDIT_INITIALIZED;
++ audit_enabled = audit_default;
++ audit_ever_enabled |= !!audit_default;
++
+ kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
+ if (IS_ERR(kauditd_task)) {
+ int err = PTR_ERR(kauditd_task);
+@@ -1511,20 +1653,16 @@ struct audit_buffer *audit_log_start(str
+ if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE)))
+ return NULL;
+
+- /* don't ever fail/sleep on these two conditions:
++ /* NOTE: don't ever fail/sleep on these two conditions:
+ * 1. auditd generated record - since we need auditd to drain the
+ * queue; also, when we are checking for auditd, compare PIDs using
+ * task_tgid_vnr() since auditd_pid is set in audit_receive_msg()
+ * using a PID anchored in the caller's namespace
+- * 2. audit command message - record types 1000 through 1099 inclusive
+- * are command messages/records used to manage the kernel subsystem
+- * and the audit userspace, blocking on these messages could cause
+- * problems under load so don't do it (note: not all of these
+- * command types are valid as record types, but it is quicker to
+- * just check two ints than a series of ints in a if/switch stmt) */
+- if (!((audit_pid && audit_pid == task_tgid_vnr(current)) ||
+- (type >= 1000 && type <= 1099))) {
+- long sleep_time = audit_backlog_wait_time;
++ * 2. generator holding the audit_cmd_mutex - we don't want to block
++ * while holding the mutex */
++ if (!(auditd_test_task(current) ||
++ (current == __mutex_owner(&audit_cmd_mutex)))) {
++ long stime = audit_backlog_wait_time;
+
+ while (audit_backlog_limit &&
+ (skb_queue_len(&audit_queue) > audit_backlog_limit)) {
+@@ -1533,14 +1671,13 @@ struct audit_buffer *audit_log_start(str
+
+ /* sleep if we are allowed and we haven't exhausted our
+ * backlog wait limit */
+- if ((gfp_mask & __GFP_DIRECT_RECLAIM) &&
+- (sleep_time > 0)) {
++ if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue_exclusive(&audit_backlog_wait,
+ &wait);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- sleep_time = schedule_timeout(sleep_time);
++ stime = schedule_timeout(stime);
+ remove_wait_queue(&audit_backlog_wait, &wait);
+ } else {
+ if (audit_rate_check() && printk_ratelimit())
+@@ -2119,15 +2256,27 @@ out:
+ */
+ void audit_log_end(struct audit_buffer *ab)
+ {
++ struct sk_buff *skb;
++ struct nlmsghdr *nlh;
++
+ if (!ab)
+ return;
+- if (!audit_rate_check()) {
+- audit_log_lost("rate limit exceeded");
+- } else {
+- skb_queue_tail(&audit_queue, ab->skb);
+- wake_up_interruptible(&kauditd_wait);
++
++ if (audit_rate_check()) {
++ skb = ab->skb;
+ ab->skb = NULL;
+- }
++
++ /* setup the netlink header, see the comments in
++ * kauditd_send_multicast_skb() for length quirks */
++ nlh = nlmsg_hdr(skb);
++ nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
++
++ /* queue the netlink packet and poke the kauditd thread */
++ skb_queue_tail(&audit_queue, skb);
++ wake_up_interruptible(&kauditd_wait);
++ } else
++ audit_log_lost("rate limit exceeded");
++
+ audit_buffer_free(ab);
+ }
+
+--- a/kernel/audit.h
++++ b/kernel/audit.h
+@@ -215,7 +215,7 @@ extern void audit_log_name(struct audit_
+ struct audit_names *n, const struct path *path,
+ int record_num, int *call_panic);
+
+-extern int audit_pid;
++extern int auditd_test_task(const struct task_struct *task);
+
+ #define AUDIT_INODE_BUCKETS 32
+ extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
+@@ -247,10 +247,6 @@ struct audit_netlink_list {
+
+ int audit_send_list(void *);
+
+-struct audit_net {
+- struct sock *nlsk;
+-};
+-
+ extern int selinux_audit_rule_update(void);
+
+ extern struct mutex audit_filter_mutex;
+@@ -337,8 +333,7 @@ extern int audit_filter(int msgtype, uns
+ extern int __audit_signal_info(int sig, struct task_struct *t);
+ static inline int audit_signal_info(int sig, struct task_struct *t)
+ {
+- if (unlikely((audit_pid && t->tgid == audit_pid) ||
+- (audit_signals && !audit_dummy_context())))
++ if (auditd_test_task(t) || (audit_signals && !audit_dummy_context()))
+ return __audit_signal_info(sig, t);
+ return 0;
+ }
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -762,7 +762,7 @@ static enum audit_state audit_filter_sys
+ struct audit_entry *e;
+ enum audit_state state;
+
+- if (audit_pid && tsk->tgid == audit_pid)
++ if (auditd_test_task(tsk))
+ return AUDIT_DISABLED;
+
+ rcu_read_lock();
+@@ -816,7 +816,7 @@ void audit_filter_inodes(struct task_str
+ {
+ struct audit_names *n;
+
+- if (audit_pid && tsk->tgid == audit_pid)
++ if (auditd_test_task(tsk))
+ return;
+
+ rcu_read_lock();
+@@ -2251,7 +2251,7 @@ int __audit_signal_info(int sig, struct
+ struct audit_context *ctx = tsk->audit_context;
+ kuid_t uid = current_uid(), t_uid = task_uid(t);
+
+- if (audit_pid && t->tgid == audit_pid) {
++ if (auditd_test_task(t)) {
+ if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
+ audit_sig_pid = task_tgid_nr(tsk);
+ if (uid_valid(tsk->loginuid))
--- /dev/null
+From abda288bb207e5c681306299126af8c022709c18 Mon Sep 17 00:00:00 2001
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Date: Sun, 19 Feb 2017 16:33:35 -0800
+Subject: auxdisplay: img-ascii-lcd: add missing sentinel entry in img_ascii_lcd_matches
+
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+commit abda288bb207e5c681306299126af8c022709c18 upstream.
+
+The OF device table must be terminated, otherwise we'll be walking past it
+and into areas unknown.
+
+Fixes: 0cad855fbd08 ("auxdisplay: img-ascii-lcd: driver for simple ASCII...")
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Tested-by: Fengguang Wu <fengguang.wu@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/auxdisplay/img-ascii-lcd.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/auxdisplay/img-ascii-lcd.c
++++ b/drivers/auxdisplay/img-ascii-lcd.c
+@@ -218,6 +218,7 @@ static const struct of_device_id img_asc
+ { .compatible = "img,boston-lcd", .data = &boston_config },
+ { .compatible = "mti,malta-lcd", .data = &malta_config },
+ { .compatible = "mti,sead3-lcd", .data = &sead3_config },
++ { /* sentinel */ }
+ };
+
+ /**
--- /dev/null
+From 95a49603707d982b25d17c5b70e220a05556a2f9 Mon Sep 17 00:00:00 2001
+From: Ming Lei <tom.leiming@gmail.com>
+Date: Wed, 22 Mar 2017 10:14:43 +0800
+Subject: blk-mq: don't complete un-started request in timeout handler
+
+From: Ming Lei <tom.leiming@gmail.com>
+
+commit 95a49603707d982b25d17c5b70e220a05556a2f9 upstream.
+
+When iterating busy requests in timeout handler,
+if the STARTED flag of one request isn't set, that means
+the request is being processed in block layer or driver, and
+isn't submitted to hardware yet.
+
+In current implementation of blk_mq_check_expired(),
+if the request queue becomes dying, un-started requests are
+handled as being completed/freed immediately. This way is
+wrong, and can cause rq corruption or double allocation[1][2],
+when doing I/O and removing&resetting NVMe device at the sametime.
+
+This patch fixes several issues reported by Yi Zhang.
+
+[1]. oops log 1
+[ 581.789754] ------------[ cut here ]------------
+[ 581.789758] kernel BUG at block/blk-mq.c:374!
+[ 581.789760] invalid opcode: 0000 [#1] SMP
+[ 581.789761] Modules linked in: vfat fat ipmi_ssif intel_rapl sb_edac
+edac_core x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm nvme
+irqbypass crct10dif_pclmul nvme_core crc32_pclmul ghash_clmulni_intel
+intel_cstate ipmi_si mei_me ipmi_devintf intel_uncore sg ipmi_msghandler
+intel_rapl_perf iTCO_wdt mei iTCO_vendor_support mxm_wmi lpc_ich dcdbas shpchp
+pcspkr acpi_power_meter wmi nfsd auth_rpcgss nfs_acl lockd dm_multipath grace
+sunrpc ip_tables xfs libcrc32c sd_mod mgag200 i2c_algo_bit drm_kms_helper
+syscopyarea sysfillrect sysimgblt fb_sys_fops ttm drm ahci libahci
+crc32c_intel tg3 libata megaraid_sas i2c_core ptp fjes pps_core dm_mirror
+dm_region_hash dm_log dm_mod
+[ 581.789796] CPU: 1 PID: 1617 Comm: kworker/1:1H Not tainted 4.10.0.bz1420297+ #4
+[ 581.789797] Hardware name: Dell Inc. PowerEdge R730xd/072T6D, BIOS 2.2.5 09/06/2016
+[ 581.789804] Workqueue: kblockd blk_mq_timeout_work
+[ 581.789806] task: ffff8804721c8000 task.stack: ffffc90006ee4000
+[ 581.789809] RIP: 0010:blk_mq_end_request+0x58/0x70
+[ 581.789810] RSP: 0018:ffffc90006ee7d50 EFLAGS: 00010202
+[ 581.789811] RAX: 0000000000000001 RBX: ffff8802e4195340 RCX: ffff88028e2f4b88
+[ 581.789812] RDX: 0000000000001000 RSI: 0000000000001000 RDI: 0000000000000000
+[ 581.789813] RBP: ffffc90006ee7d60 R08: 0000000000000003 R09: ffff88028e2f4b00
+[ 581.789814] R10: 0000000000001000 R11: 0000000000000001 R12: 00000000fffffffb
+[ 581.789815] R13: ffff88042abe5780 R14: 000000000000002d R15: ffff88046fbdff80
+[ 581.789817] FS: 0000000000000000(0000) GS:ffff88047fc00000(0000) knlGS:0000000000000000
+[ 581.789818] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 581.789819] CR2: 00007f64f403a008 CR3: 000000014d078000 CR4: 00000000001406e0
+[ 581.789820] Call Trace:
+[ 581.789825] blk_mq_check_expired+0x76/0x80
+[ 581.789828] bt_iter+0x45/0x50
+[ 581.789830] blk_mq_queue_tag_busy_iter+0xdd/0x1f0
+[ 581.789832] ? blk_mq_rq_timed_out+0x70/0x70
+[ 581.789833] ? blk_mq_rq_timed_out+0x70/0x70
+[ 581.789840] ? __switch_to+0x140/0x450
+[ 581.789841] blk_mq_timeout_work+0x88/0x170
+[ 581.789845] process_one_work+0x165/0x410
+[ 581.789847] worker_thread+0x137/0x4c0
+[ 581.789851] kthread+0x101/0x140
+[ 581.789853] ? rescuer_thread+0x3b0/0x3b0
+[ 581.789855] ? kthread_park+0x90/0x90
+[ 581.789860] ret_from_fork+0x2c/0x40
+[ 581.789861] Code: 48 85 c0 74 0d 44 89 e6 48 89 df ff d0 5b 41 5c 5d c3 48
+8b bb 70 01 00 00 48 85 ff 75 0f 48 89 df e8 7d f0 ff ff 5b 41 5c 5d c3 <0f>
+0b e8 71 f0 ff ff 90 eb e9 0f 1f 40 00 66 2e 0f 1f 84 00 00
+[ 581.789882] RIP: blk_mq_end_request+0x58/0x70 RSP: ffffc90006ee7d50
+[ 581.789889] ---[ end trace bcaf03d9a14a0a70 ]---
+
+[2]. oops log2
+[ 6984.857362] BUG: unable to handle kernel NULL pointer dereference at 0000000000000010
+[ 6984.857372] IP: nvme_queue_rq+0x6e6/0x8cd [nvme]
+[ 6984.857373] PGD 0
+[ 6984.857374]
+[ 6984.857376] Oops: 0000 [#1] SMP
+[ 6984.857379] Modules linked in: ipmi_ssif vfat fat intel_rapl sb_edac
+edac_core x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm
+irqbypass crct10dif_pclmul crc32_pclmul ghash_clmulni_intel ipmi_si iTCO_wdt
+iTCO_vendor_support mxm_wmi ipmi_devintf intel_cstate sg dcdbas intel_uncore
+mei_me intel_rapl_perf mei pcspkr lpc_ich ipmi_msghandler shpchp
+acpi_power_meter wmi nfsd auth_rpcgss dm_multipath nfs_acl lockd grace sunrpc
+ip_tables xfs libcrc32c sd_mod mgag200 i2c_algo_bit drm_kms_helper syscopyarea
+sysfillrect crc32c_intel sysimgblt fb_sys_fops ttm nvme drm nvme_core ahci
+libahci i2c_core tg3 libata ptp megaraid_sas pps_core fjes dm_mirror
+dm_region_hash dm_log dm_mod
+[ 6984.857416] CPU: 7 PID: 1635 Comm: kworker/7:1H Not tainted
+4.10.0-2.el7.bz1420297.x86_64 #1
+[ 6984.857417] Hardware name: Dell Inc. PowerEdge R730xd/072T6D, BIOS 2.2.5 09/06/2016
+[ 6984.857427] Workqueue: kblockd blk_mq_run_work_fn
+[ 6984.857429] task: ffff880476e3da00 task.stack: ffffc90002e90000
+[ 6984.857432] RIP: 0010:nvme_queue_rq+0x6e6/0x8cd [nvme]
+[ 6984.857433] RSP: 0018:ffffc90002e93c50 EFLAGS: 00010246
+[ 6984.857434] RAX: 0000000000000000 RBX: ffff880275646600 RCX: 0000000000001000
+[ 6984.857435] RDX: 0000000000000fff RSI: 00000002fba2a000 RDI: ffff8804734e6950
+[ 6984.857436] RBP: ffffc90002e93d30 R08: 0000000000002000 R09: 0000000000001000
+[ 6984.857437] R10: 0000000000001000 R11: 0000000000000000 R12: ffff8804741d8000
+[ 6984.857438] R13: 0000000000000040 R14: ffff880475649f80 R15: ffff8804734e6780
+[ 6984.857439] FS: 0000000000000000(0000) GS:ffff88047fcc0000(0000) knlGS:0000000000000000
+[ 6984.857440] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 6984.857442] CR2: 0000000000000010 CR3: 0000000001c09000 CR4: 00000000001406e0
+[ 6984.857443] Call Trace:
+[ 6984.857451] ? mempool_free+0x2b/0x80
+[ 6984.857455] ? bio_free+0x4e/0x60
+[ 6984.857459] blk_mq_dispatch_rq_list+0xf5/0x230
+[ 6984.857462] blk_mq_process_rq_list+0x133/0x170
+[ 6984.857465] __blk_mq_run_hw_queue+0x8c/0xa0
+[ 6984.857467] blk_mq_run_work_fn+0x12/0x20
+[ 6984.857473] process_one_work+0x165/0x410
+[ 6984.857475] worker_thread+0x137/0x4c0
+[ 6984.857478] kthread+0x101/0x140
+[ 6984.857480] ? rescuer_thread+0x3b0/0x3b0
+[ 6984.857481] ? kthread_park+0x90/0x90
+[ 6984.857489] ret_from_fork+0x2c/0x40
+[ 6984.857490] Code: 8b bd 70 ff ff ff 89 95 50 ff ff ff 89 8d 58 ff ff ff 44
+89 95 60 ff ff ff e8 b7 dd 12 e1 8b 95 50 ff ff ff 48 89 85 68 ff ff ff <4c>
+8b 48 10 44 8b 58 18 8b 8d 58 ff ff ff 44 8b 95 60 ff ff ff
+[ 6984.857511] RIP: nvme_queue_rq+0x6e6/0x8cd [nvme] RSP: ffffc90002e93c50
+[ 6984.857512] CR2: 0000000000000010
+[ 6984.895359] ---[ end trace 2d7ceb528432bf83 ]---
+
+Reported-by: Yi Zhang <yizhan@redhat.com>
+Tested-by: Yi Zhang <yizhan@redhat.com>
+Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
+Reviewed-by: Hannes Reinecke <hare@suse.com>
+Signed-off-by: Ming Lei <tom.leiming@gmail.com>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-mq.c | 11 +----------
+ 1 file changed, 1 insertion(+), 10 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -678,17 +678,8 @@ static void blk_mq_check_expired(struct
+ {
+ struct blk_mq_timeout_data *data = priv;
+
+- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+- /*
+- * If a request wasn't started before the queue was
+- * marked dying, kill it here or it'll go unnoticed.
+- */
+- if (unlikely(blk_queue_dying(rq->q))) {
+- rq->errors = -EIO;
+- blk_mq_end_request(rq, rq->errors);
+- }
++ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ return;
+- }
+
+ if (time_after_eq(jiffies, rq->deadline)) {
+ if (!blk_mark_rq_complete(rq))
--- /dev/null
+From a05d4fd9176003e0c1f9c3d083f4dac19fd346ab Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 14 Mar 2017 19:25:56 -0400
+Subject: cgroup, net_cls: iterate the fds of only the tasks which are being migrated
+
+From: Tejun Heo <tj@kernel.org>
+
+commit a05d4fd9176003e0c1f9c3d083f4dac19fd346ab upstream.
+
+The net_cls controller controls the classid field of each socket which
+is associated with the cgroup. Because the classid is per-socket
+attribute, when a task migrates to another cgroup or the configured
+classid of the cgroup changes, the controller needs to walk all
+sockets and update the classid value, which was implemented by
+3b13758f51de ("cgroups: Allow dynamically changing net_classid").
+
+While the approach is not scalable, migrating tasks which have a lot
+of fds attached to them is rare and the cost is born by the ones
+initiating the operations. However, for simplicity, both the
+migration and classid config change paths call update_classid() which
+scans all fds of all tasks in the target css. This is an overkill for
+the migration path which only needs to cover a much smaller subset of
+tasks which are actually getting migrated in.
+
+On cgroup v1, this can lead to unexpected scalability issues when one
+tries to migrate a task or process into a net_cls cgroup which already
+contains a lot of fds. Even if the migration traget doesn't have many
+to get scanned, update_classid() ends up scanning all fds in the
+target cgroup which can be extremely numerous.
+
+Unfortunately, on cgroup v2 which doesn't use net_cls, the problem is
+even worse. Before bfc2cf6f61fc ("cgroup: call subsys->*attach() only
+for subsystems which are actually affected by migration"), cgroup core
+would call the ->css_attach callback even for controllers which don't
+see actual migration to a different css.
+
+As net_cls is always disabled but still mounted on cgroup v2, whenever
+a process is migrated on the cgroup v2 hierarchy, net_cls sees
+identity migration from root to root and cgroup core used to call
+->css_attach callback for those. The net_cls ->css_attach ends up
+calling update_classid() on the root net_cls css to which all
+processes on the system belong to as the controller isn't used. This
+makes any cgroup v2 migration O(total_number_of_fds_on_the_system)
+which is horrible and easily leads to noticeable stalls triggering RCU
+stall warnings and so on.
+
+The worst symptom is already fixed in upstream by bfc2cf6f61fc
+("cgroup: call subsys->*attach() only for subsystems which are
+actually affected by migration"); however, backporting that commit is
+too invasive and we want to avoid other cases too.
+
+This patch updates net_cls's cgrp_attach() to iterate fds of only the
+processes which are actually getting migrated. This removes the
+surprising migration cost which is dependent on the total number of
+fds in the target cgroup. As this leaves write_classid() the only
+user of update_classid(), open-code the helper into write_classid().
+
+Reported-by: David Goode <dgoode@fb.com>
+Fixes: 3b13758f51de ("cgroups: Allow dynamically changing net_classid")
+Cc: Nina Schiff <ninasc@fb.com>
+Cc: David S. Miller <davem@davemloft.net>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/core/netclassid_cgroup.c | 32 ++++++++++++++++----------------
+ 1 file changed, 16 insertions(+), 16 deletions(-)
+
+--- a/net/core/netclassid_cgroup.c
++++ b/net/core/netclassid_cgroup.c
+@@ -69,27 +69,17 @@ static int update_classid_sock(const voi
+ return 0;
+ }
+
+-static void update_classid(struct cgroup_subsys_state *css, void *v)
++static void cgrp_attach(struct cgroup_taskset *tset)
+ {
+- struct css_task_iter it;
++ struct cgroup_subsys_state *css;
+ struct task_struct *p;
+
+- css_task_iter_start(css, &it);
+- while ((p = css_task_iter_next(&it))) {
++ cgroup_taskset_for_each(p, css, tset) {
+ task_lock(p);
+- iterate_fd(p->files, 0, update_classid_sock, v);
++ iterate_fd(p->files, 0, update_classid_sock,
++ (void *)(unsigned long)css_cls_state(css)->classid);
+ task_unlock(p);
+ }
+- css_task_iter_end(&it);
+-}
+-
+-static void cgrp_attach(struct cgroup_taskset *tset)
+-{
+- struct cgroup_subsys_state *css;
+-
+- cgroup_taskset_first(tset, &css);
+- update_classid(css,
+- (void *)(unsigned long)css_cls_state(css)->classid);
+ }
+
+ static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
+@@ -101,12 +91,22 @@ static int write_classid(struct cgroup_s
+ u64 value)
+ {
+ struct cgroup_cls_state *cs = css_cls_state(css);
++ struct css_task_iter it;
++ struct task_struct *p;
+
+ cgroup_sk_alloc_disable();
+
+ cs->classid = (u32)value;
+
+- update_classid(css, (void *)(unsigned long)cs->classid);
++ css_task_iter_start(css, &it);
++ while ((p = css_task_iter_next(&it))) {
++ task_lock(p);
++ iterate_fd(p->files, 0, update_classid_sock,
++ (void *)(unsigned long)cs->classid);
++ task_unlock(p);
++ }
++ css_task_iter_end(&it);
++
+ return 0;
+ }
+
--- /dev/null
+From ac8616e4c81dded650dfade49a7da283565d37ce Mon Sep 17 00:00:00 2001
+From: Chen-Yu Tsai <wens@csie.org>
+Date: Tue, 14 Feb 2017 11:35:22 +0800
+Subject: clk: sunxi-ng: mp: Adjust parent rate for pre-dividers
+
+From: Chen-Yu Tsai <wens@csie.org>
+
+commit ac8616e4c81dded650dfade49a7da283565d37ce upstream.
+
+The MP style clocks support an mux with pre-dividers. While the driver
+correctly accounted for them in the .determine_rate callback, it did
+not in the .recalc_rate and .set_rate callbacks.
+
+This means when calculating the factors in the .set_rate callback, they
+would be off by a factor of the active pre-divider. Same goes for
+reading back the clock rate after it is set.
+
+Fixes: 2ab836db5097 ("clk: sunxi-ng: Add M-P factor clock support")
+Signed-off-by: Chen-Yu Tsai <wens@csie.org>
+Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/clk/sunxi-ng/ccu_mp.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/clk/sunxi-ng/ccu_mp.c
++++ b/drivers/clk/sunxi-ng/ccu_mp.c
+@@ -85,6 +85,10 @@ static unsigned long ccu_mp_recalc_rate(
+ unsigned int m, p;
+ u32 reg;
+
++ /* Adjust parent_rate according to pre-dividers */
++ ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
++ -1, &parent_rate);
++
+ reg = readl(cmp->common.base + cmp->common.reg);
+
+ m = reg >> cmp->m.shift;
+@@ -114,6 +118,10 @@ static int ccu_mp_set_rate(struct clk_hw
+ unsigned int m, p;
+ u32 reg;
+
++ /* Adjust parent_rate according to pre-dividers */
++ ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
++ -1, &parent_rate);
++
+ max_m = cmp->m.max ?: 1 << cmp->m.width;
+ max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
+
--- /dev/null
+From 9ad0bb39fce319d7b92c17d306ed0a9f70a02e7d Mon Sep 17 00:00:00 2001
+From: Chen-Yu Tsai <wens@csie.org>
+Date: Tue, 14 Feb 2017 10:23:32 +0800
+Subject: clk: sunxi-ng: sun6i: Fix enable bit offset for hdmi-ddc module clock
+
+From: Chen-Yu Tsai <wens@csie.org>
+
+commit 9ad0bb39fce319d7b92c17d306ed0a9f70a02e7d upstream.
+
+The enable bit offset for the hdmi-ddc module clock is wrong. It is
+pointing to the main hdmi module clock enable bit.
+
+Reported-by: Bob Ham <rah@settrans.net>
+Fixes: c6e6c96d8fa6 ("clk: sunxi-ng: Add A31/A31s clocks")
+Signed-off-by: Chen-Yu Tsai <wens@csie.org>
+Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/clk/sunxi-ng/ccu-sun6i-a31.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
++++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+@@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_cl
+ 0x150, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+-static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0);
++static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
+
+ static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
+
--- /dev/null
+From 07fef3623407444e51c12ea57cd91df38c1069e0 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Mon, 20 Mar 2017 09:58:33 +0100
+Subject: cpsw/netcp: cpts depends on posix_timers
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 07fef3623407444e51c12ea57cd91df38c1069e0 upstream.
+
+With posix timers having become optional, we get a build error with
+the cpts time sync option of the CPSW driver:
+
+drivers/net/ethernet/ti/cpts.c: In function 'cpts_find_ts':
+drivers/net/ethernet/ti/cpts.c:291:23: error: implicit declaration of function 'ptp_classify_raw';did you mean 'ptp_classifier_init'? [-Werror=implicit-function-declaration]
+
+This adds a hard dependency on PTP_CLOCK to avoid the problem, as
+building it without PTP support makes no sense anyway.
+
+Fixes: baa73d9e478f ("posix-timers: Make them configurable")
+Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Nicolas Pitre <nico@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/ti/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/ti/Kconfig
++++ b/drivers/net/ethernet/ti/Kconfig
+@@ -76,7 +76,7 @@ config TI_CPSW
+ config TI_CPTS
+ tristate "TI Common Platform Time Sync (CPTS) Support"
+ depends on TI_CPSW || TI_KEYSTONE_NETCP
+- imply PTP_1588_CLOCK
++ depends on PTP_1588_CLOCK
+ ---help---
+ This driver supports the Common Platform Time Sync unit of
+ the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
--- /dev/null
+From ff010472fb75670cb5c08671e820eeea3af59c87 Mon Sep 17 00:00:00 2001
+From: Viresh Kumar <viresh.kumar@linaro.org>
+Date: Tue, 21 Mar 2017 11:36:06 +0530
+Subject: cpufreq: Restore policy min/max limits on CPU online
+
+From: Viresh Kumar <viresh.kumar@linaro.org>
+
+commit ff010472fb75670cb5c08671e820eeea3af59c87 upstream.
+
+On CPU online the cpufreq core restores the previous governor (or
+the previous "policy" setting for ->setpolicy drivers), but it does
+not restore the min/max limits at the same time, which is confusing,
+inconsistent and real pain for users who set the limits and then
+suspend/resume the system (using full suspend), in which case the
+limits are reset on all CPUs except for the boot one.
+
+Fix this by making cpufreq_online() restore the limits when an inactive
+policy is brought online.
+
+The commit log and patch are inspired from Rafael's earlier work.
+
+Reported-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1190,6 +1190,9 @@ static int cpufreq_online(unsigned int c
+ for_each_cpu(j, policy->related_cpus)
+ per_cpu(cpufreq_cpu_data, j) = policy;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ } else {
++ policy->min = policy->user_policy.min;
++ policy->max = policy->user_policy.max;
+ }
+
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
--- /dev/null
+From 7c468447f40645fbf2a033dfdaa92b1957130d50 Mon Sep 17 00:00:00 2001
+From: Gary R Hook <ghook@amd.com>
+Date: Fri, 10 Mar 2017 12:28:18 -0600
+Subject: crypto: ccp - Assign DMA commands to the channel's CCP
+
+From: Gary R Hook <ghook@amd.com>
+
+commit 7c468447f40645fbf2a033dfdaa92b1957130d50 upstream.
+
+The CCP driver generally uses a round-robin approach when
+assigning operations to available CCPs. For the DMA engine,
+however, the DMA mappings of the SGs are associated with a
+specific CCP. When an IOMMU is enabled, the IOMMU is
+programmed based on this specific device.
+
+If the DMA operations are not performed by that specific
+CCP then addressing errors and I/O page faults will occur.
+
+Update the CCP driver to allow a specific CCP device to be
+requested for an operation and use this in the DMA engine
+support.
+
+Signed-off-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-dev.c | 5 ++++-
+ drivers/crypto/ccp/ccp-dmaengine.c | 1 +
+ include/linux/ccp.h | 2 +-
+ 3 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/ccp/ccp-dev.c
++++ b/drivers/crypto/ccp/ccp-dev.c
+@@ -283,11 +283,14 @@ EXPORT_SYMBOL_GPL(ccp_version);
+ */
+ int ccp_enqueue_cmd(struct ccp_cmd *cmd)
+ {
+- struct ccp_device *ccp = ccp_get_device();
++ struct ccp_device *ccp;
+ unsigned long flags;
+ unsigned int i;
+ int ret;
+
++ /* Some commands might need to be sent to a specific device */
++ ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
++
+ if (!ccp)
+ return -ENODEV;
+
+--- a/drivers/crypto/ccp/ccp-dmaengine.c
++++ b/drivers/crypto/ccp/ccp-dmaengine.c
+@@ -390,6 +390,7 @@ static struct ccp_dma_desc *ccp_create_d
+ goto err;
+
+ ccp_cmd = &cmd->ccp_cmd;
++ ccp_cmd->ccp = chan->ccp;
+ ccp_pt = &ccp_cmd->u.passthru_nomap;
+ ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
+ ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
+--- a/include/linux/ccp.h
++++ b/include/linux/ccp.h
+@@ -556,7 +556,7 @@ enum ccp_engine {
+ * struct ccp_cmd - CCP operation request
+ * @entry: list element (ccp driver use only)
+ * @work: work element used for callbacks (ccp driver use only)
+- * @ccp: CCP device to be run on (ccp driver use only)
++ * @ccp: CCP device to be run on
+ * @ret: operation return code (ccp driver use only)
+ * @flags: cmd processing flags
+ * @engine: CCP operation to perform
--- /dev/null
+From 9a5476020a5f06a0fc6f17097efc80275d2f03cd Mon Sep 17 00:00:00 2001
+From: "K. Y. Srinivasan" <kys@microsoft.com>
+Date: Mon, 13 Mar 2017 15:57:09 -0700
+Subject: Drivers: hv: vmbus: Don't leak channel ids
+
+From: K. Y. Srinivasan <kys@microsoft.com>
+
+commit 9a5476020a5f06a0fc6f17097efc80275d2f03cd upstream.
+
+If we cannot allocate memory for the channel, free the relid
+associated with the channel.
+
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hv/channel_mgmt.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -779,6 +779,7 @@ static void vmbus_onoffer(struct vmbus_c
+ /* Allocate the channel object and save this offer. */
+ newchannel = alloc_channel();
+ if (!newchannel) {
++ vmbus_release_relid(offer->child_relid);
+ pr_err("Unable to allocate channel object\n");
+ return;
+ }
--- /dev/null
+From 5e030d5ce9d99a899b648413139ff65bab12b038 Mon Sep 17 00:00:00 2001
+From: "K. Y. Srinivasan" <kys@microsoft.com>
+Date: Sun, 12 Mar 2017 20:00:30 -0700
+Subject: Drivers: hv: vmbus: Don't leak memory when a channel is rescinded
+
+From: K. Y. Srinivasan <kys@microsoft.com>
+
+commit 5e030d5ce9d99a899b648413139ff65bab12b038 upstream.
+
+When we close a channel that has been rescinded, we will leak memory since
+vmbus_teardown_gpadl() returns an error. Fix this so that we can properly
+cleanup the memory allocated to the ring buffers.
+
+Fixes: ccb61f8a99e6 ("Drivers: hv: vmbus: Fix a rescind handling bug")
+
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Cc: Dexuan Cui <decui@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hv/channel.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -506,12 +506,15 @@ int vmbus_teardown_gpadl(struct vmbus_ch
+
+ wait_for_completion(&info->waitevent);
+
+- if (channel->rescind) {
+- ret = -ENODEV;
+- goto post_msg_err;
+- }
+-
+ post_msg_err:
++ /*
++ * If the channel has been rescinded;
++ * we will be awakened by the rescind
++ * handler; set the error code to zero so we don't leak memory.
++ */
++ if (channel->rescind)
++ ret = 0;
++
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
--- /dev/null
+From cf8c73afb3abf0f8905efbaddd4ce11a0deec9da Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 17 Mar 2017 10:22:51 +0800
+Subject: drm/amd/amdgpu: add POLARIS12 PCI ID
+
+From: Evan Quan <evan.quan@amd.com>
+
+commit cf8c73afb3abf0f8905efbaddd4ce11a0deec9da upstream.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -424,6 +424,7 @@ static const struct pci_device_id pciidl
+ {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
++ {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+
+ {0, 0, 0}
--- /dev/null
+From e11ddff68a7c455e63c4b46154a3e75c699a7b55 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 15 Mar 2017 21:13:25 -0400
+Subject: drm/amdgpu: reinstate oland workaround for sclk
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit e11ddff68a7c455e63c4b46154a3e75c699a7b55 upstream.
+
+Higher sclks seem to be unstable on some boards.
+
+bug: https://bugs.freedesktop.org/show_bug.cgi?id=100222
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/si_dpm.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -3498,9 +3498,13 @@ static void si_apply_state_adjust_rules(
+ max_sclk = 75000;
+ }
+ } else if (adev->asic_type == CHIP_OLAND) {
+- if ((adev->pdev->device == 0x6604) &&
+- (adev->pdev->subsystem_vendor == 0x1028) &&
+- (adev->pdev->subsystem_device == 0x066F)) {
++ if ((adev->pdev->revision == 0xC7) ||
++ (adev->pdev->revision == 0x80) ||
++ (adev->pdev->revision == 0x81) ||
++ (adev->pdev->revision == 0x83) ||
++ (adev->pdev->revision == 0x87) ||
++ (adev->pdev->device == 0x6604) ||
++ (adev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ }
+ }
--- /dev/null
+From 1b53cf9815bb4744958d41f3795d5d5a1d365e2d Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 21 Feb 2017 15:07:11 -0800
+Subject: fscrypt: remove broken support for detecting keyring key revocation
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 1b53cf9815bb4744958d41f3795d5d5a1d365e2d upstream.
+
+Filesystem encryption ostensibly supported revoking a keyring key that
+had been used to "unlock" encrypted files, causing those files to become
+"locked" again. This was, however, buggy for several reasons, the most
+severe of which was that when key revocation happened to be detected for
+an inode, its fscrypt_info was immediately freed, even while other
+threads could be using it for encryption or decryption concurrently.
+This could be exploited to crash the kernel or worse.
+
+This patch fixes the use-after-free by removing the code which detects
+the keyring key having been revoked, invalidated, or expired. Instead,
+an encrypted inode that is "unlocked" now simply remains unlocked until
+it is evicted from memory. Note that this is no worse than the case for
+block device-level encryption, e.g. dm-crypt, and it still remains
+possible for a privileged user to evict unused pages, inodes, and
+dentries by running 'sync; echo 3 > /proc/sys/vm/drop_caches', or by
+simply unmounting the filesystem. In fact, one of those actions was
+already needed anyway for key revocation to work even somewhat sanely.
+This change is not expected to break any applications.
+
+In the future I'd like to implement a real API for fscrypt key
+revocation that interacts sanely with ongoing filesystem operations ---
+waiting for existing operations to complete and blocking new operations,
+and invalidating and sanitizing key material and plaintext from the VFS
+caches. But this is a hard problem, and for now this bug must be fixed.
+
+This bug affected almost all versions of ext4, f2fs, and ubifs
+encryption, and it was potentially reachable in any kernel configured
+with encryption support (CONFIG_EXT4_ENCRYPTION=y,
+CONFIG_EXT4_FS_ENCRYPTION=y, CONFIG_F2FS_FS_ENCRYPTION=y, or
+CONFIG_UBIFS_FS_ENCRYPTION=y). Note that older kernels did not use the
+shared fs/crypto/ code, but due to the potential security implications
+of this bug, it may still be worthwhile to backport this fix to them.
+
+Fixes: b7236e21d55f ("ext4 crypto: reorganize how we store keys in the inode")
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Acked-by: Michael Halcrow <mhalcrow@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/crypto/crypto.c | 10 --------
+ fs/crypto/fname.c | 2 -
+ fs/crypto/fscrypt_private.h | 4 ---
+ fs/crypto/keyinfo.c | 52 +++++++-------------------------------------
+ 4 files changed, 11 insertions(+), 57 deletions(-)
+
+--- a/fs/crypto/crypto.c
++++ b/fs/crypto/crypto.c
+@@ -394,7 +394,6 @@ EXPORT_SYMBOL(fscrypt_zeroout_range);
+ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
+ {
+ struct dentry *dir;
+- struct fscrypt_info *ci;
+ int dir_has_key, cached_with_key;
+
+ if (flags & LOOKUP_RCU)
+@@ -406,18 +405,11 @@ static int fscrypt_d_revalidate(struct d
+ return 0;
+ }
+
+- ci = d_inode(dir)->i_crypt_info;
+- if (ci && ci->ci_keyring_key &&
+- (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+- (1 << KEY_FLAG_REVOKED) |
+- (1 << KEY_FLAG_DEAD))))
+- ci = NULL;
+-
+ /* this should eventually be an flag in d_flags */
+ spin_lock(&dentry->d_lock);
+ cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
+ spin_unlock(&dentry->d_lock);
+- dir_has_key = (ci != NULL);
++ dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
+ dput(dir);
+
+ /*
+--- a/fs/crypto/fname.c
++++ b/fs/crypto/fname.c
+@@ -350,7 +350,7 @@ int fscrypt_setup_filename(struct inode
+ fname->disk_name.len = iname->len;
+ return 0;
+ }
+- ret = fscrypt_get_crypt_info(dir);
++ ret = fscrypt_get_encryption_info(dir);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+--- a/fs/crypto/fscrypt_private.h
++++ b/fs/crypto/fscrypt_private.h
+@@ -67,7 +67,6 @@ struct fscrypt_info {
+ u8 ci_filename_mode;
+ u8 ci_flags;
+ struct crypto_skcipher *ci_ctfm;
+- struct key *ci_keyring_key;
+ u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+ };
+
+@@ -87,7 +86,4 @@ struct fscrypt_completion_result {
+ /* crypto.c */
+ int fscrypt_initialize(unsigned int cop_flags);
+
+-/* keyinfo.c */
+-extern int fscrypt_get_crypt_info(struct inode *);
+-
+ #endif /* _FSCRYPT_PRIVATE_H */
+--- a/fs/crypto/keyinfo.c
++++ b/fs/crypto/keyinfo.c
+@@ -99,6 +99,7 @@ static int validate_user_key(struct fscr
+ kfree(full_key_descriptor);
+ if (IS_ERR(keyring_key))
+ return PTR_ERR(keyring_key);
++ down_read(&keyring_key->sem);
+
+ if (keyring_key->type != &key_type_logon) {
+ printk_once(KERN_WARNING
+@@ -106,11 +107,9 @@ static int validate_user_key(struct fscr
+ res = -ENOKEY;
+ goto out;
+ }
+- down_read(&keyring_key->sem);
+ ukp = user_key_payload(keyring_key);
+ if (ukp->datalen != sizeof(struct fscrypt_key)) {
+ res = -EINVAL;
+- up_read(&keyring_key->sem);
+ goto out;
+ }
+ master_key = (struct fscrypt_key *)ukp->data;
+@@ -121,17 +120,11 @@ static int validate_user_key(struct fscr
+ "%s: key size incorrect: %d\n",
+ __func__, master_key->size);
+ res = -ENOKEY;
+- up_read(&keyring_key->sem);
+ goto out;
+ }
+ res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
+- up_read(&keyring_key->sem);
+- if (res)
+- goto out;
+-
+- crypt_info->ci_keyring_key = keyring_key;
+- return 0;
+ out:
++ up_read(&keyring_key->sem);
+ key_put(keyring_key);
+ return res;
+ }
+@@ -173,12 +166,11 @@ static void put_crypt_info(struct fscryp
+ if (!ci)
+ return;
+
+- key_put(ci->ci_keyring_key);
+ crypto_free_skcipher(ci->ci_ctfm);
+ kmem_cache_free(fscrypt_info_cachep, ci);
+ }
+
+-int fscrypt_get_crypt_info(struct inode *inode)
++int fscrypt_get_encryption_info(struct inode *inode)
+ {
+ struct fscrypt_info *crypt_info;
+ struct fscrypt_context ctx;
+@@ -188,21 +180,15 @@ int fscrypt_get_crypt_info(struct inode
+ u8 *raw_key = NULL;
+ int res;
+
++ if (inode->i_crypt_info)
++ return 0;
++
+ res = fscrypt_initialize(inode->i_sb->s_cop->flags);
+ if (res)
+ return res;
+
+ if (!inode->i_sb->s_cop->get_context)
+ return -EOPNOTSUPP;
+-retry:
+- crypt_info = ACCESS_ONCE(inode->i_crypt_info);
+- if (crypt_info) {
+- if (!crypt_info->ci_keyring_key ||
+- key_validate(crypt_info->ci_keyring_key) == 0)
+- return 0;
+- fscrypt_put_encryption_info(inode, crypt_info);
+- goto retry;
+- }
+
+ res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ if (res < 0) {
+@@ -230,7 +216,6 @@ retry:
+ crypt_info->ci_data_mode = ctx.contents_encryption_mode;
+ crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
+ crypt_info->ci_ctfm = NULL;
+- crypt_info->ci_keyring_key = NULL;
+ memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
+ sizeof(crypt_info->ci_master_key));
+
+@@ -286,14 +271,8 @@ got_key:
+ if (res)
+ goto out;
+
+- kzfree(raw_key);
+- raw_key = NULL;
+- if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
+- put_crypt_info(crypt_info);
+- goto retry;
+- }
+- return 0;
+-
++ if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
++ crypt_info = NULL;
+ out:
+ if (res == -ENOKEY)
+ res = 0;
+@@ -301,6 +280,7 @@ out:
+ kzfree(raw_key);
+ return res;
+ }
++EXPORT_SYMBOL(fscrypt_get_encryption_info);
+
+ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
+ {
+@@ -318,17 +298,3 @@ void fscrypt_put_encryption_info(struct
+ put_crypt_info(ci);
+ }
+ EXPORT_SYMBOL(fscrypt_put_encryption_info);
+-
+-int fscrypt_get_encryption_info(struct inode *inode)
+-{
+- struct fscrypt_info *ci = inode->i_crypt_info;
+-
+- if (!ci ||
+- (ci->ci_keyring_key &&
+- (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+- (1 << KEY_FLAG_REVOKED) |
+- (1 << KEY_FLAG_DEAD)))))
+- return fscrypt_get_crypt_info(inode);
+- return 0;
+-}
+-EXPORT_SYMBOL(fscrypt_get_encryption_info);
--- /dev/null
+From 69db7009318758769d625b023402161c750f7876 Mon Sep 17 00:00:00 2001
+From: Prarit Bhargava <prarit@redhat.com>
+Date: Tue, 14 Mar 2017 07:36:01 -0400
+Subject: hwrng: amd - Revert managed API changes
+
+From: Prarit Bhargava <prarit@redhat.com>
+
+commit 69db7009318758769d625b023402161c750f7876 upstream.
+
+After commit 31b2a73c9c5f ("hwrng: amd - Migrate to managed API"), the
+amd-rng driver uses devres with pci_dev->dev to keep track of resources,
+but does not actually register a PCI driver. This results in the
+following issues:
+
+1. The message
+
+WARNING: CPU: 2 PID: 621 at drivers/base/dd.c:349 driver_probe_device+0x38c
+
+is output when the i2c_amd756 driver loads and attempts to register a PCI
+driver. The PCI & device subsystems assume that no resources have been
+registered for the device, and the WARN_ON() triggers since amd-rng has
+already do so.
+
+2. The driver leaks memory because the driver does not attach to a
+device. The driver only uses the PCI device as a reference. devm_*()
+functions will release resources on driver detach, which the amd-rng
+driver will never do. As a result,
+
+3. The driver cannot be reloaded because there is always a use of the
+ioport and region after the first load of the driver.
+
+Revert the changes made by 31b2a73c9c5f ("hwrng: amd - Migrate to managed
+API").
+
+Signed-off-by: Prarit Bhargava <prarit@redhat.com>
+Fixes: 31b2a73c9c5f ("hwrng: amd - Migrate to managed API").
+Cc: Matt Mackall <mpm@selenic.com>
+Cc: Corentin LABBE <clabbe.montjoie@gmail.com>
+Cc: PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
+Cc: Wei Yongjun <weiyongjun1@huawei.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: linux-geode@lists.infradead.org
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/hw_random/amd-rng.c | 42 +++++++++++++++++++++++++++++++--------
+ 1 file changed, 34 insertions(+), 8 deletions(-)
+
+--- a/drivers/char/hw_random/amd-rng.c
++++ b/drivers/char/hw_random/amd-rng.c
+@@ -55,6 +55,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
+ struct amd768_priv {
+ void __iomem *iobase;
+ struct pci_dev *pcidev;
++ u32 pmbase;
+ };
+
+ static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+@@ -148,33 +149,58 @@ found:
+ if (pmbase == 0)
+ return -EIO;
+
+- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+- if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET,
+- PMBASE_SIZE, DRV_NAME)) {
++ if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
+ dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
+ pmbase + 0xF0);
+- return -EBUSY;
++ err = -EBUSY;
++ goto out;
+ }
+
+- priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET,
+- PMBASE_SIZE);
++ priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+ if (!priv->iobase) {
+ pr_err(DRV_NAME "Cannot map ioport\n");
+- return -ENOMEM;
++ err = -EINVAL;
++ goto err_iomap;
+ }
+
+ amd_rng.priv = (unsigned long)priv;
++ priv->pmbase = pmbase;
+ priv->pcidev = pdev;
+
+ pr_info(DRV_NAME " detected\n");
+- return devm_hwrng_register(&pdev->dev, &amd_rng);
++ err = hwrng_register(&amd_rng);
++ if (err) {
++ pr_err(DRV_NAME " registering failed (%d)\n", err);
++ goto err_hwrng;
++ }
++ return 0;
++
++err_hwrng:
++ ioport_unmap(priv->iobase);
++err_iomap:
++ release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
++out:
++ kfree(priv);
++ return err;
+ }
+
+ static void __exit mod_exit(void)
+ {
++ struct amd768_priv *priv;
++
++ priv = (struct amd768_priv *)amd_rng.priv;
++
++ hwrng_unregister(&amd_rng);
++
++ ioport_unmap(priv->iobase);
++
++ release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
++
++ kfree(priv);
+ }
+
+ module_init(mod_init);
--- /dev/null
+From 8c75704ebcac2ffa31ee7bcc359baf701b52bf00 Mon Sep 17 00:00:00 2001
+From: Prarit Bhargava <prarit@redhat.com>
+Date: Tue, 14 Mar 2017 07:36:02 -0400
+Subject: hwrng: geode - Revert managed API changes
+
+From: Prarit Bhargava <prarit@redhat.com>
+
+commit 8c75704ebcac2ffa31ee7bcc359baf701b52bf00 upstream.
+
+After commit e9afc746299d ("hwrng: geode - Use linux/io.h instead of
+asm/io.h") the geode-rng driver uses devres with pci_dev->dev to keep
+track of resources, but does not actually register a PCI driver. This
+results in the following issues:
+
+1. The driver leaks memory because the driver does not attach to a
+device. The driver only uses the PCI device as a reference. devm_*()
+functions will release resources on driver detach, which the geode-rng
+driver will never do. As a result,
+
+2. The driver cannot be reloaded because there is always a use of the
+ioport and region after the first load of the driver.
+
+Revert the changes made by e9afc746299d ("hwrng: geode - Use linux/io.h
+instead of asm/io.h").
+
+Signed-off-by: Prarit Bhargava <prarit@redhat.com>
+Fixes: 6e9b5e76882c ("hwrng: geode - Migrate to managed API")
+Cc: Matt Mackall <mpm@selenic.com>
+Cc: Corentin LABBE <clabbe.montjoie@gmail.com>
+Cc: PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
+Cc: Wei Yongjun <weiyongjun1@huawei.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: linux-geode@lists.infradead.org
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/hw_random/geode-rng.c | 50 +++++++++++++++++++++++++------------
+ 1 file changed, 35 insertions(+), 15 deletions(-)
+
+--- a/drivers/char/hw_random/geode-rng.c
++++ b/drivers/char/hw_random/geode-rng.c
+@@ -31,6 +31,9 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+
++
++#define PFX KBUILD_MODNAME ": "
++
+ #define GEODE_RNG_DATA_REG 0x50
+ #define GEODE_RNG_STATUS_REG 0x54
+
+@@ -82,6 +85,7 @@ static struct hwrng geode_rng = {
+
+ static int __init mod_init(void)
+ {
++ int err = -ENODEV;
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *ent;
+ void __iomem *mem;
+@@ -89,27 +93,43 @@ static int __init mod_init(void)
+
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pci_tbl, pdev);
+- if (ent) {
+- rng_base = pci_resource_start(pdev, 0);
+- if (rng_base == 0)
+- return -ENODEV;
+-
+- mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
+- if (!mem)
+- return -ENOMEM;
+- geode_rng.priv = (unsigned long)mem;
+-
+- pr_info("AMD Geode RNG detected\n");
+- return devm_hwrng_register(&pdev->dev, &geode_rng);
+- }
++ if (ent)
++ goto found;
+ }
+-
+ /* Device not found. */
+- return -ENODEV;
++ goto out;
++
++found:
++ rng_base = pci_resource_start(pdev, 0);
++ if (rng_base == 0)
++ goto out;
++ err = -ENOMEM;
++ mem = ioremap(rng_base, 0x58);
++ if (!mem)
++ goto out;
++ geode_rng.priv = (unsigned long)mem;
++
++ pr_info("AMD Geode RNG detected\n");
++ err = hwrng_register(&geode_rng);
++ if (err) {
++ pr_err(PFX "RNG registering failed (%d)\n",
++ err);
++ goto err_unmap;
++ }
++out:
++ return err;
++
++err_unmap:
++ iounmap(mem);
++ goto out;
+ }
+
+ static void __exit mod_exit(void)
+ {
++ void __iomem *mem = (void __iomem *)geode_rng.priv;
++
++ hwrng_unregister(&geode_rng);
++ iounmap(mem);
+ }
+
+ module_init(mod_init);
--- /dev/null
+From e609ccef5222c73b46b322be7d3796d60bff353d Mon Sep 17 00:00:00 2001
+From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Date: Fri, 24 Feb 2017 16:04:15 +0200
+Subject: intel_th: Don't leak module refcount on failure to activate
+
+From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+
+commit e609ccef5222c73b46b322be7d3796d60bff353d upstream.
+
+Output 'activation' may fail for the reasons of the output driver,
+for example, if msc's buffer is not allocated. We forget, however,
+to drop the module reference in this case. So each attempt at
+activation in this case leaks a reference, preventing the module
+from ever unloading.
+
+This patch adds the missing module_put() in the activation error
+path.
+
+Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwtracing/intel_th/core.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/hwtracing/intel_th/core.c
++++ b/drivers/hwtracing/intel_th/core.c
+@@ -221,8 +221,10 @@ static int intel_th_output_activate(stru
+ else
+ intel_th_trace_enable(thdev);
+
+- if (ret)
++ if (ret) {
+ pm_runtime_put(&thdev->dev);
++ module_put(thdrv->driver.owner);
++ }
+
+ return ret;
+ }
--- /dev/null
+From 7d2aa6b814476a2e2794960f844344519246df72 Mon Sep 17 00:00:00 2001
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+Date: Mon, 20 Mar 2017 10:17:56 +0100
+Subject: iommu/exynos: Block SYSMMU while invalidating FLPD cache
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+commit 7d2aa6b814476a2e2794960f844344519246df72 upstream.
+
+Documentation specifies that SYSMMU should be in blocked state while
+performing TLB/FLPD cache invalidation, so add needed calls to
+sysmmu_block/unblock.
+
+Fixes: 66a7ed84b345d ("iommu/exynos: Apply workaround of caching fault page table entries")
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/exynos-iommu.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/exynos-iommu.c
++++ b/drivers/iommu/exynos-iommu.c
+@@ -509,7 +509,10 @@ static void sysmmu_tlb_invalidate_flpdca
+ spin_lock_irqsave(&data->lock, flags);
+ if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
+ clk_enable(data->clk_master);
+- __sysmmu_tlb_invalidate_entry(data, iova, 1);
++ if (sysmmu_block(data)) {
++ __sysmmu_tlb_invalidate_entry(data, iova, 1);
++ sysmmu_unblock(data);
++ }
+ clk_disable(data->clk_master);
+ }
+ spin_unlock_irqrestore(&data->lock, flags);
--- /dev/null
+From cd37a296a9f890586665bb8974a8b17ee2f17d6d Mon Sep 17 00:00:00 2001
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+Date: Mon, 20 Mar 2017 10:17:57 +0100
+Subject: iommu/exynos: Workaround FLPD cache flush issues for SYSMMU v5
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+commit cd37a296a9f890586665bb8974a8b17ee2f17d6d upstream.
+
+For some unknown reasons, in some cases, FLPD cache invalidation doesn't
+work properly with SYSMMU v5 controllers found in Exynos5433 SoCs. This
+can be observed by a firmware crash during initialization phase of MFC
+video decoder available in the mentioned SoCs when IOMMU support is
+enabled. To workaround this issue perform a full TLB/FLPD invalidation
+in case of replacing any first level page descriptors in case of SYSMMU v5.
+
+Fixes: 740a01eee9ada ("iommu/exynos: Add support for v5 SYSMMU")
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Tested-by: Andrzej Hajda <a.hajda@samsung.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/exynos-iommu.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/exynos-iommu.c
++++ b/drivers/iommu/exynos-iommu.c
+@@ -510,7 +510,10 @@ static void sysmmu_tlb_invalidate_flpdca
+ if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
+ clk_enable(data->clk_master);
+ if (sysmmu_block(data)) {
+- __sysmmu_tlb_invalidate_entry(data, iova, 1);
++ if (data->version >= MAKE_MMU_VER(5, 0))
++ __sysmmu_tlb_invalidate(data);
++ else
++ __sysmmu_tlb_invalidate_entry(data, iova, 1);
+ sysmmu_unblock(data);
+ }
+ clk_disable(data->clk_master);
--- /dev/null
+From 5003ae1e735e6bfe4679d9bed6846274f322e77e Mon Sep 17 00:00:00 2001
+From: Koos Vriezen <koos.vriezen@gmail.com>
+Date: Wed, 1 Mar 2017 21:02:50 +0100
+Subject: iommu/vt-d: Fix NULL pointer dereference in device_to_iommu
+
+From: Koos Vriezen <koos.vriezen@gmail.com>
+
+commit 5003ae1e735e6bfe4679d9bed6846274f322e77e upstream.
+
+The function device_to_iommu() in the Intel VT-d driver
+lacks a NULL-ptr check, resulting in this oops at boot on
+some platforms:
+
+ BUG: unable to handle kernel NULL pointer dereference at 00000000000007ab
+ IP: [<ffffffff8132234a>] device_to_iommu+0x11a/0x1a0
+ PGD 0
+
+ [...]
+
+ Call Trace:
+ ? find_or_alloc_domain.constprop.29+0x1a/0x300
+ ? dw_dma_probe+0x561/0x580 [dw_dmac_core]
+ ? __get_valid_domain_for_dev+0x39/0x120
+ ? __intel_map_single+0x138/0x180
+ ? intel_alloc_coherent+0xb6/0x120
+ ? sst_hsw_dsp_init+0x173/0x420 [snd_soc_sst_haswell_pcm]
+ ? mutex_lock+0x9/0x30
+ ? kernfs_add_one+0xdb/0x130
+ ? devres_add+0x19/0x60
+ ? hsw_pcm_dev_probe+0x46/0xd0 [snd_soc_sst_haswell_pcm]
+ ? platform_drv_probe+0x30/0x90
+ ? driver_probe_device+0x1ed/0x2b0
+ ? __driver_attach+0x8f/0xa0
+ ? driver_probe_device+0x2b0/0x2b0
+ ? bus_for_each_dev+0x55/0x90
+ ? bus_add_driver+0x110/0x210
+ ? 0xffffffffa11ea000
+ ? driver_register+0x52/0xc0
+ ? 0xffffffffa11ea000
+ ? do_one_initcall+0x32/0x130
+ ? free_vmap_area_noflush+0x37/0x70
+ ? kmem_cache_alloc+0x88/0xd0
+ ? do_init_module+0x51/0x1c4
+ ? load_module+0x1ee9/0x2430
+ ? show_taint+0x20/0x20
+ ? kernel_read_file+0xfd/0x190
+ ? SyS_finit_module+0xa3/0xb0
+ ? do_syscall_64+0x4a/0xb0
+ ? entry_SYSCALL64_slow_path+0x25/0x25
+ Code: 78 ff ff ff 4d 85 c0 74 ee 49 8b 5a 10 0f b6 9b e0 00 00 00 41 38 98 e0 00 00 00 77 da 0f b6 eb 49 39 a8 88 00 00 00 72 ce eb 8f <41> f6 82 ab 07 00 00 04 0f 85 76 ff ff ff 0f b6 4d 08 88 0e 49
+ RIP [<ffffffff8132234a>] device_to_iommu+0x11a/0x1a0
+ RSP <ffffc90001457a78>
+ CR2: 00000000000007ab
+ ---[ end trace 16f974b6d58d0aad ]---
+
+Add the missing pointer check.
+
+Fixes: 1c387188c60f53b338c20eee32db055dfe022a9b ("iommu/vt-d: Fix IOMMU lookup for SR-IOV Virtual Functions")
+Signed-off-by: Koos Vriezen <koos.vriezen@gmail.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/intel-iommu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -915,7 +915,7 @@ static struct intel_iommu *device_to_iom
+ * which we used for the IOMMU lookup. Strictly speaking
+ * we could do this for all PCI devices; we only need to
+ * get the BDF# from the scope table for ACPI matches. */
+- if (pdev->is_virtfn)
++ if (pdev && pdev->is_virtfn)
+ goto got_pdev;
+
+ *bus = drhd->devices[i].bus;
--- /dev/null
+From cd9cb405e0b948363811dc74dbb2890f56f2cb87 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Wed, 15 Mar 2017 15:08:48 -0400
+Subject: jbd2: don't leak memory if setting up journal fails
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit cd9cb405e0b948363811dc74dbb2890f56f2cb87 upstream.
+
+In journal_init_common(), if we failed to allocate the j_wbuf array, or
+if we failed to create the buffer_head for the journal superblock, we
+leaked the memory allocated for the revocation tables. Fix this.
+
+Fixes: f0c9fd5458bacf7b12a9a579a727dc740cbe047e
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/jbd2/journal.c | 22 +++++++++++-----------
+ fs/jbd2/revoke.c | 1 +
+ 2 files changed, 12 insertions(+), 11 deletions(-)
+
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1125,10 +1125,8 @@ static journal_t *journal_init_common(st
+
+ /* Set up a default-sized revoke table for the new mount. */
+ err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
+- if (err) {
+- kfree(journal);
+- return NULL;
+- }
++ if (err)
++ goto err_cleanup;
+
+ spin_lock_init(&journal->j_history_lock);
+
+@@ -1145,23 +1143,25 @@ static journal_t *journal_init_common(st
+ journal->j_wbufsize = n;
+ journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
+ GFP_KERNEL);
+- if (!journal->j_wbuf) {
+- kfree(journal);
+- return NULL;
+- }
++ if (!journal->j_wbuf)
++ goto err_cleanup;
+
+ bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
+ if (!bh) {
+ pr_err("%s: Cannot get buffer for journal superblock\n",
+ __func__);
+- kfree(journal->j_wbuf);
+- kfree(journal);
+- return NULL;
++ goto err_cleanup;
+ }
+ journal->j_sb_buffer = bh;
+ journal->j_superblock = (journal_superblock_t *)bh->b_data;
+
+ return journal;
++
++err_cleanup:
++ kfree(journal->j_wbuf);
++ jbd2_journal_destroy_revoke(journal);
++ kfree(journal);
++ return NULL;
+ }
+
+ /* jbd2_journal_init_dev and jbd2_journal_init_inode:
+--- a/fs/jbd2/revoke.c
++++ b/fs/jbd2/revoke.c
+@@ -280,6 +280,7 @@ int jbd2_journal_init_revoke(journal_t *
+
+ fail1:
+ jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
++ journal->j_revoke_table[0] = NULL;
+ fail0:
+ return -ENOMEM;
+ }
--- /dev/null
+From 633ee407b9d15a75ac9740ba9d3338815e1fcb95 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Tue, 21 Mar 2017 13:44:28 +0100
+Subject: libceph: force GFP_NOIO for socket allocations
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 633ee407b9d15a75ac9740ba9d3338815e1fcb95 upstream.
+
+sock_alloc_inode() allocates socket+inode and socket_wq with
+GFP_KERNEL, which is not allowed on the writeback path:
+
+ Workqueue: ceph-msgr con_work [libceph]
+ ffff8810871cb018 0000000000000046 0000000000000000 ffff881085d40000
+ 0000000000012b00 ffff881025cad428 ffff8810871cbfd8 0000000000012b00
+ ffff880102fc1000 ffff881085d40000 ffff8810871cb038 ffff8810871cb148
+ Call Trace:
+ [<ffffffff816dd629>] schedule+0x29/0x70
+ [<ffffffff816e066d>] schedule_timeout+0x1bd/0x200
+ [<ffffffff81093ffc>] ? ttwu_do_wakeup+0x2c/0x120
+ [<ffffffff81094266>] ? ttwu_do_activate.constprop.135+0x66/0x70
+ [<ffffffff816deb5f>] wait_for_completion+0xbf/0x180
+ [<ffffffff81097cd0>] ? try_to_wake_up+0x390/0x390
+ [<ffffffff81086335>] flush_work+0x165/0x250
+ [<ffffffff81082940>] ? worker_detach_from_pool+0xd0/0xd0
+ [<ffffffffa03b65b1>] xlog_cil_force_lsn+0x81/0x200 [xfs]
+ [<ffffffff816d6b42>] ? __slab_free+0xee/0x234
+ [<ffffffffa03b4b1d>] _xfs_log_force_lsn+0x4d/0x2c0 [xfs]
+ [<ffffffff811adc1e>] ? lookup_page_cgroup_used+0xe/0x30
+ [<ffffffffa039a723>] ? xfs_reclaim_inode+0xa3/0x330 [xfs]
+ [<ffffffffa03b4dcf>] xfs_log_force_lsn+0x3f/0xf0 [xfs]
+ [<ffffffffa039a723>] ? xfs_reclaim_inode+0xa3/0x330 [xfs]
+ [<ffffffffa03a62c6>] xfs_iunpin_wait+0xc6/0x1a0 [xfs]
+ [<ffffffff810aa250>] ? wake_atomic_t_function+0x40/0x40
+ [<ffffffffa039a723>] xfs_reclaim_inode+0xa3/0x330 [xfs]
+ [<ffffffffa039ac07>] xfs_reclaim_inodes_ag+0x257/0x3d0 [xfs]
+ [<ffffffffa039bb13>] xfs_reclaim_inodes_nr+0x33/0x40 [xfs]
+ [<ffffffffa03ab745>] xfs_fs_free_cached_objects+0x15/0x20 [xfs]
+ [<ffffffff811c0c18>] super_cache_scan+0x178/0x180
+ [<ffffffff8115912e>] shrink_slab_node+0x14e/0x340
+ [<ffffffff811afc3b>] ? mem_cgroup_iter+0x16b/0x450
+ [<ffffffff8115af70>] shrink_slab+0x100/0x140
+ [<ffffffff8115e425>] do_try_to_free_pages+0x335/0x490
+ [<ffffffff8115e7f9>] try_to_free_pages+0xb9/0x1f0
+ [<ffffffff816d56e4>] ? __alloc_pages_direct_compact+0x69/0x1be
+ [<ffffffff81150cba>] __alloc_pages_nodemask+0x69a/0xb40
+ [<ffffffff8119743e>] alloc_pages_current+0x9e/0x110
+ [<ffffffff811a0ac5>] new_slab+0x2c5/0x390
+ [<ffffffff816d71c4>] __slab_alloc+0x33b/0x459
+ [<ffffffff815b906d>] ? sock_alloc_inode+0x2d/0xd0
+ [<ffffffff8164bda1>] ? inet_sendmsg+0x71/0xc0
+ [<ffffffff815b906d>] ? sock_alloc_inode+0x2d/0xd0
+ [<ffffffff811a21f2>] kmem_cache_alloc+0x1a2/0x1b0
+ [<ffffffff815b906d>] sock_alloc_inode+0x2d/0xd0
+ [<ffffffff811d8566>] alloc_inode+0x26/0xa0
+ [<ffffffff811da04a>] new_inode_pseudo+0x1a/0x70
+ [<ffffffff815b933e>] sock_alloc+0x1e/0x80
+ [<ffffffff815ba855>] __sock_create+0x95/0x220
+ [<ffffffff815baa04>] sock_create_kern+0x24/0x30
+ [<ffffffffa04794d9>] con_work+0xef9/0x2050 [libceph]
+ [<ffffffffa04aa9ec>] ? rbd_img_request_submit+0x4c/0x60 [rbd]
+ [<ffffffff81084c19>] process_one_work+0x159/0x4f0
+ [<ffffffff8108561b>] worker_thread+0x11b/0x530
+ [<ffffffff81085500>] ? create_worker+0x1d0/0x1d0
+ [<ffffffff8108b6f9>] kthread+0xc9/0xe0
+ [<ffffffff8108b630>] ? flush_kthread_worker+0x90/0x90
+ [<ffffffff816e1b98>] ret_from_fork+0x58/0x90
+ [<ffffffff8108b630>] ? flush_kthread_worker+0x90/0x90
+
+Use memalloc_noio_{save,restore}() to temporarily force GFP_NOIO here.
+
+Link: http://tracker.ceph.com/issues/19309
+Reported-by: Sergey Jerusalimov <wintchester@gmail.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ceph/messenger.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -7,6 +7,7 @@
+ #include <linux/kthread.h>
+ #include <linux/net.h>
+ #include <linux/nsproxy.h>
++#include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/socket.h>
+ #include <linux/string.h>
+@@ -469,11 +470,16 @@ static int ceph_tcp_connect(struct ceph_
+ {
+ struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
+ struct socket *sock;
++ unsigned int noio_flag;
+ int ret;
+
+ BUG_ON(con->sock);
++
++ /* sock_create_kern() allocates with GFP_KERNEL */
++ noio_flag = memalloc_noio_save();
+ ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
+ SOCK_STREAM, IPPROTO_TCP, &sock);
++ memalloc_noio_restore(noio_flag);
+ if (ret)
+ return ret;
+ sock->sk->sk_allocation = GFP_NOFS;
--- /dev/null
+From e2ebfb2142acefecc2496e71360f50d25726040b Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Mon, 20 Mar 2017 19:50:29 +0200
+Subject: mmc: sdhci: Do not disable interrupts while waiting for clock
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit e2ebfb2142acefecc2496e71360f50d25726040b upstream.
+
+Disabling interrupts for even a millisecond can cause problems for some
+devices. That can happen when sdhci changes clock frequency because it
+waits for the clock to become stable under a spin lock.
+
+The spin lock is not necessary here. Anything that is racing with changes
+to the I/O state is already broken. The mmc core already provides
+synchronization via "claiming" the host.
+
+Although the spin lock probably should be removed from the code paths that
+lead to this point, such a patch would touch too much code to be suitable
+for stable trees. Consequently, for this patch, just drop the spin lock
+while waiting.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Tested-by: Ludovic Desroches <ludovic.desroches@microchip.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1362,7 +1362,9 @@ void sdhci_enable_clk(struct sdhci_host
+ return;
+ }
+ timeout--;
+- mdelay(1);
++ spin_unlock_irq(&host->lock);
++ usleep_range(900, 1100);
++ spin_lock_irq(&host->lock);
+ }
+
+ clk |= SDHCI_CLOCK_CARD_EN;
--- /dev/null
+From 16681037e75ce08f2980ac5dbb03414429c7a55d Mon Sep 17 00:00:00 2001
+From: Anssi Hannula <anssi.hannula@bitwise.fi>
+Date: Mon, 13 Feb 2017 14:06:10 +0200
+Subject: mmc: sdhci-of-arasan: fix incorrect timeout clock
+
+From: Anssi Hannula <anssi.hannula@bitwise.fi>
+
+commit 16681037e75ce08f2980ac5dbb03414429c7a55d upstream.
+
+sdhci_arasan_get_timeout_clock() divides the frequency it has with (1 <<
+(13 + divisor)).
+
+However, the divisor is not some Arasan-specific value, but instead is
+just the Data Timeout Counter Value from the SDHCI Timeout Control
+Register.
+
+Applying it here like this is wrong as the sdhci driver already takes
+that value into account when calculating timeouts, and in fact it *sets*
+that register value based on how long a timeout is wanted.
+
+Additionally, sdhci core interprets the .get_timeout_clock callback
+return value as if it were read from hardware registers, i.e. the unit
+should be kHz or MHz depending on SDHCI_TIMEOUT_CLK_UNIT capability bit.
+This bit is set at least on the tested Zynq-7000 SoC.
+
+With the tested hardware (SDHCI_TIMEOUT_CLK_UNIT set) this results in
+too high a timeout clock rate being reported, causing the core to use
+longer-than-needed timeouts. Additionally, on a partitioned MMC
+(therefore having erase_group_def bit set) mmc_calc_max_discard()
+disables discard support as it looks like controller does not support
+the long timeouts needed for that.
+
+Do not apply the extra divisor and return the timeout clock in the
+expected unit.
+
+Tested with a Zynq-7000 SoC and a partitioned Toshiba THGBMAG5A1JBAWR
+eMMC card.
+
+Signed-off-by: Anssi Hannula <anssi.hannula@bitwise.fi>
+Fixes: e3ec3a3d11ad ("mmc: arasan: Add driver for Arasan SDHCI")
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-of-arasan.c | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-of-arasan.c
++++ b/drivers/mmc/host/sdhci-of-arasan.c
+@@ -28,13 +28,9 @@
+ #include "sdhci-pltfm.h"
+ #include <linux/of.h>
+
+-#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
+ #define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+
+ #define VENDOR_ENHANCED_STROBE BIT(0)
+-#define CLK_CTRL_TIMEOUT_SHIFT 16
+-#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
+-#define CLK_CTRL_TIMEOUT_MIN_EXP 13
+
+ #define PHY_CLK_TOO_SLOW_HZ 400000
+
+@@ -163,15 +159,15 @@ static int sdhci_arasan_syscon_write(str
+
+ static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
+ {
+- u32 div;
+ unsigned long freq;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+- div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET);
+- div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT;
++ /* SDHCI timeout clock is in kHz */
++ freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000);
+
+- freq = clk_get_rate(pltfm_host->clk);
+- freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div);
++ /* or in MHz */
++ if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
++ freq = DIV_ROUND_UP(freq, 1000);
+
+ return freq;
+ }
--- /dev/null
+From 2ce0c7b65505e0d915e99389cced45b478dc935d Mon Sep 17 00:00:00 2001
+From: Romain Izard <romain.izard.pro@gmail.com>
+Date: Thu, 9 Mar 2017 16:18:20 +0100
+Subject: mmc: sdhci-of-at91: Support external regulators
+
+From: Romain Izard <romain.izard.pro@gmail.com>
+
+commit 2ce0c7b65505e0d915e99389cced45b478dc935d upstream.
+
+The SDHCI controller in the SAMA5D2 chip requires a valid voltage set
+in the power control register, otherwise commands will fail with a
+timeout error.
+
+When using the regulator framework to specify the regulator used by the
+mmc device, the voltage is not configured, and it is not possible to use
+the connected device.
+
+Implement a custom 'set_power' function for this specific hardware, that
+configures the voltage in the register in all cases.
+
+Signed-off-by: Romain Izard <romain.izard.pro@gmail.com>
+Acked-by: Ludovic Desroches <ludovic.desroches@microchip.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-of-at91.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/drivers/mmc/host/sdhci-of-at91.c
++++ b/drivers/mmc/host/sdhci-of-at91.c
+@@ -85,11 +85,30 @@ static void sdhci_at91_set_clock(struct
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ }
+
++/*
++ * In this specific implementation of the SDHCI controller, the power register
++ * needs to have a valid voltage set even when the power supply is managed by
++ * an external regulator.
++ */
++static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
++ unsigned short vdd)
++{
++ if (!IS_ERR(host->mmc->supply.vmmc)) {
++ struct mmc_host *mmc = host->mmc;
++
++ spin_unlock_irq(&host->lock);
++ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
++ spin_lock_irq(&host->lock);
++ }
++ sdhci_set_power_noreg(host, mode, vdd);
++}
++
+ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
+ .set_clock = sdhci_at91_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
++ .set_power = sdhci_at91_set_power,
+ };
+
+ static const struct sdhci_pltfm_data soc_data_sama5d2 = {
--- /dev/null
+From 027fb89e61054b4aedd962adb3e2003dec78a716 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Mon, 20 Mar 2017 19:50:30 +0200
+Subject: mmc: sdhci-pci: Do not disable interrupts in sdhci_intel_set_power
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit 027fb89e61054b4aedd962adb3e2003dec78a716 upstream.
+
+Disabling interrupts for even a millisecond can cause problems for some
+devices. That can happen when Intel host controllers wait for the present
+state to propagate.
+
+The spin lock is not necessary here. Anything that is racing with changes
+to the I/O state is already broken. The mmc core already provides
+synchronization via "claiming" the host.
+
+Although the spin lock probably should be removed from the code paths that
+lead to this point, such a patch would touch too much code to be suitable
+for stable trees. Consequently, for this patch, just drop the spin lock
+while waiting.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Tested-by: Ludovic Desroches <ludovic.desroches@microchip.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-pci-core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -452,6 +452,8 @@ static void sdhci_intel_set_power(struct
+ if (mode == MMC_POWER_OFF)
+ return;
+
++ spin_unlock_irq(&host->lock);
++
+ /*
+ * Bus power might not enable after D3 -> D0 transition due to the
+ * present state not yet having propagated. Retry for up to 2ms.
+@@ -464,6 +466,8 @@ static void sdhci_intel_set_power(struct
+ reg |= SDHCI_POWER_ON;
+ sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
+ }
++
++ spin_lock_irq(&host->lock);
+ }
+
+ static const struct sdhci_ops sdhci_intel_byt_ops = {
--- /dev/null
+From 4e841d3eb9294ce4137fdb5d0a88f1bceab9c212 Mon Sep 17 00:00:00 2001
+From: Brian Norris <briannorris@chromium.org>
+Date: Fri, 10 Mar 2017 17:39:21 -0800
+Subject: mwifiex: pcie: don't leak DMA buffers when removing
+
+From: Brian Norris <briannorris@chromium.org>
+
+commit 4e841d3eb9294ce4137fdb5d0a88f1bceab9c212 upstream.
+
+When PCIe FLR support was added, much of the remove/release code for
+PCIe was migrated to ->down_dev(), but ->down_dev() is never called for
+device removal. Let's refactor the cleanup to be done in both cases.
+
+Also, drop the comments above mwifiex_cleanup_pcie(), because they were
+clearly wrong, and it's better to have clear and obvious code than to
+detail the code steps in comments anyway.
+
+Fixes: 4c5dae59d2e9 ("mwifiex: add PCIe function level reset support")
+Signed-off-by: Brian Norris <briannorris@chromium.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/marvell/mwifiex/pcie.c | 38 ++++++++++++++--------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -2742,6 +2742,21 @@ static void mwifiex_pcie_device_dump(str
+ schedule_work(&pcie_work);
+ }
+
++static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
++{
++ struct pcie_service_card *card = adapter->card;
++ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
++
++ if (reg->sleep_cookie)
++ mwifiex_pcie_delete_sleep_cookie_buf(adapter);
++
++ mwifiex_pcie_delete_cmdrsp_buf(adapter);
++ mwifiex_pcie_delete_evtbd_ring(adapter);
++ mwifiex_pcie_delete_rxbd_ring(adapter);
++ mwifiex_pcie_delete_txbd_ring(adapter);
++ card->cmdrsp_buf = NULL;
++}
++
+ /*
+ * This function initializes the PCI-E host memory space, WCB rings, etc.
+ *
+@@ -2853,13 +2868,6 @@ err_enable_dev:
+
+ /*
+ * This function cleans up the allocated card buffers.
+- *
+- * The following are freed by this function -
+- * - TXBD ring buffers
+- * - RXBD ring buffers
+- * - Event BD ring buffers
+- * - Command response ring buffer
+- * - Sleep cookie buffer
+ */
+ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
+ {
+@@ -2875,6 +2883,8 @@ static void mwifiex_pcie_cleanup(struct
+ "Failed to write driver not-ready signature\n");
+ }
+
++ mwifiex_pcie_free_buffers(adapter);
++
+ if (pdev) {
+ pci_iounmap(pdev, card->pci_mmap);
+ pci_iounmap(pdev, card->pci_mmap1);
+@@ -3115,10 +3125,7 @@ err_cre_txbd:
+ pci_iounmap(pdev, card->pci_mmap1);
+ }
+
+-/* This function cleans up the PCI-E host memory space.
+- * Some code is extracted from mwifiex_unregister_dev()
+- *
+- */
++/* This function cleans up the PCI-E host memory space. */
+ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
+ {
+ struct pcie_service_card *card = adapter->card;
+@@ -3130,14 +3137,7 @@ static void mwifiex_pcie_down_dev(struct
+ adapter->seq_num = 0;
+ adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
+
+- if (reg->sleep_cookie)
+- mwifiex_pcie_delete_sleep_cookie_buf(adapter);
+-
+- mwifiex_pcie_delete_cmdrsp_buf(adapter);
+- mwifiex_pcie_delete_evtbd_ring(adapter);
+- mwifiex_pcie_delete_rxbd_ring(adapter);
+- mwifiex_pcie_delete_txbd_ring(adapter);
+- card->cmdrsp_buf = NULL;
++ mwifiex_pcie_free_buffers(adapter);
+ }
+
+ static struct mwifiex_if_ops pcie_ops = {
--- /dev/null
+From 9e10889a3177340dcda7d29c6d8fbd97247b007b Mon Sep 17 00:00:00 2001
+From: Romain Izard <romain.izard.pro@gmail.com>
+Date: Fri, 17 Feb 2017 16:12:50 +0100
+Subject: Revert "ARM: at91/dt: sama5d2: Use new compatible for ohci node"
+
+From: Romain Izard <romain.izard.pro@gmail.com>
+
+commit 9e10889a3177340dcda7d29c6d8fbd97247b007b upstream.
+
+This reverts commit cab43282682e ("ARM: at91/dt: sama5d2: Use new
+compatible for ohci node")
+
+It depends from commit 7150bc9b4d43 ("usb: ohci-at91: Forcibly suspend
+ports while USB suspend") which was reverted and implemented
+differently. With the new implementation, the compatible string must
+remain the same.
+
+The compatible string introduced by this commit has been used in the
+default SAMA5D2 dtsi starting from Linux 4.8. As it has never been
+working correctly in an official release, removing it should not be
+breaking the stability rules.
+
+Fixes: cab43282682e ("ARM: at91/dt: sama5d2: Use new compatible for ohci node")
+Signed-off-by: Romain Izard <romain.izard.pro@gmail.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/sama5d2.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/sama5d2.dtsi
++++ b/arch/arm/boot/dts/sama5d2.dtsi
+@@ -266,7 +266,7 @@
+ };
+
+ usb1: ohci@00400000 {
+- compatible = "atmel,sama5d2-ohci", "usb-ohci";
++ compatible = "atmel,at91rm9200-ohci", "usb-ohci";
+ reg = <0x00400000 0x100000>;
+ interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
+ clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
ext4-mark-inode-dirty-after-converting-inline-directory.patch
ext4-lock-the-xattr-block-before-checksuming-it.patch
powerpc-64s-fix-idle-wakeup-potential-to-clobber-registers.patch
+audit-fix-auditd-kernel-connection-state-tracking.patch
+mmc-sdhci-of-at91-support-external-regulators.patch
+mmc-sdhci-of-arasan-fix-incorrect-timeout-clock.patch
+mmc-sdhci-do-not-disable-interrupts-while-waiting-for-clock.patch
+mmc-sdhci-pci-do-not-disable-interrupts-in-sdhci_intel_set_power.patch
+hwrng-amd-revert-managed-api-changes.patch
+hwrng-geode-revert-managed-api-changes.patch
+clk-sunxi-ng-sun6i-fix-enable-bit-offset-for-hdmi-ddc-module-clock.patch
+clk-sunxi-ng-mp-adjust-parent-rate-for-pre-dividers.patch
+mwifiex-pcie-don-t-leak-dma-buffers-when-removing.patch
+ath10k-fix-incorrect-wlan_mac_base-in-qca6174_regs.patch
+crypto-ccp-assign-dma-commands-to-the-channel-s-ccp.patch
+fscrypt-remove-broken-support-for-detecting-keyring-key-revocation.patch
+vfio-rework-group-release-notifier-warning.patch
+xen-acpi-upload-pm-state-from-init-domain-to-xen.patch
+iommu-vt-d-fix-null-pointer-dereference-in-device_to_iommu.patch
+iommu-exynos-block-sysmmu-while-invalidating-flpd-cache.patch
+iommu-exynos-workaround-flpd-cache-flush-issues-for-sysmmu-v5.patch
+revert-arm-at91-dt-sama5d2-use-new-compatible-for-ohci-node.patch
+arm-at91-pm-cpu_idle-switch-ddr-to-power-down-mode.patch
+arm64-kaslr-fix-up-the-kernel-image-alignment.patch
+cpufreq-restore-policy-min-max-limits-on-cpu-online.patch
+cgroup-net_cls-iterate-the-fds-of-only-the-tasks-which-are-being-migrated.patch
+blk-mq-don-t-complete-un-started-request-in-timeout-handler.patch
+libceph-force-gfp_noio-for-socket-allocations.patch
+cpsw-netcp-cpts-depends-on-posix_timers.patch
+drm-amdgpu-reinstate-oland-workaround-for-sclk.patch
+drm-amd-amdgpu-add-polaris12-pci-id.patch
+auxdisplay-img-ascii-lcd-add-missing-sentinel-entry-in-img_ascii_lcd_matches.patch
+jbd2-don-t-leak-memory-if-setting-up-journal-fails.patch
+intel_th-don-t-leak-module-refcount-on-failure-to-activate.patch
+drivers-hv-vmbus-don-t-leak-channel-ids.patch
+drivers-hv-vmbus-don-t-leak-memory-when-a-channel-is-rescinded.patch
--- /dev/null
+From 65b1adebfe43c642dfe3b109edb5d992db5fbe72 Mon Sep 17 00:00:00 2001
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Tue, 21 Mar 2017 13:19:09 -0600
+Subject: vfio: Rework group release notifier warning
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+commit 65b1adebfe43c642dfe3b109edb5d992db5fbe72 upstream.
+
+The intent of the original warning is make sure that the mdev vendor
+driver has removed any group notifiers at the point where the group
+is closed by the user. Theoretically this would be through an
+orderly shutdown where any devices are release prior to the group
+release. We can't always count on an orderly shutdown, the user can
+close the group before the notifier can be removed or the user task
+might be killed. We'd like to add this sanity test when the group is
+idle and the only references are from the devices within the group
+themselves, but we don't have a good way to do that. Instead check
+both when the group itself is removed and when the group is opened.
+A bit later than we'd prefer, but better than the current over
+aggressive approach.
+
+Fixes: ccd46dbae77d ("vfio: support notifier chain in vfio_group")
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Cc: Jike Song <jike.song@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vfio/vfio.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/vfio/vfio.c
++++ b/drivers/vfio/vfio.c
+@@ -403,6 +403,7 @@ static void vfio_group_release(struct kr
+ struct iommu_group *iommu_group = group->iommu_group;
+
+ WARN_ON(!list_empty(&group->device_list));
++ WARN_ON(group->notifier.head);
+
+ list_for_each_entry_safe(unbound, tmp,
+ &group->unbound_list, unbound_next) {
+@@ -1573,6 +1574,10 @@ static int vfio_group_fops_open(struct i
+ return -EBUSY;
+ }
+
++ /* Warn if previous user didn't cleanup and re-init to drop them */
++ if (WARN_ON(group->notifier.head))
++ BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
++
+ filep->private_data = group;
+
+ return 0;
+@@ -1584,9 +1589,6 @@ static int vfio_group_fops_release(struc
+
+ filep->private_data = NULL;
+
+- /* Any user didn't unregister? */
+- WARN_ON(group->notifier.head);
+-
+ vfio_group_try_dissolve_container(group);
+
+ atomic_dec(&group->opened);
--- /dev/null
+From 1914f0cd203c941bba72f9452c8290324f1ef3dc Mon Sep 17 00:00:00 2001
+From: Ankur Arora <ankur.a.arora@oracle.com>
+Date: Tue, 21 Mar 2017 15:43:38 -0700
+Subject: xen/acpi: upload PM state from init-domain to Xen
+
+From: Ankur Arora <ankur.a.arora@oracle.com>
+
+commit 1914f0cd203c941bba72f9452c8290324f1ef3dc upstream.
+
+This was broken in commit cd979883b9ed ("xen/acpi-processor:
+fix enabling interrupts on syscore_resume"). do_suspend (from
+xen/manage.c) and thus xen_resume_notifier never get called on
+the initial-domain at resume (it is if running as guest.)
+
+The rationale for the breaking change was that upload_pm_data()
+potentially does blocking work in syscore_resume(). This patch
+addresses the original issue by scheduling upload_pm_data() to
+execute in workqueue context.
+
+Cc: Stanislaw Gruszka <sgruszka@redhat.com>
+Based-on-patch-by: Konrad Wilk <konrad.wilk@oracle.com>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Reviewed-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/xen-acpi-processor.c | 34 ++++++++++++++++++++++++++--------
+ 1 file changed, 26 insertions(+), 8 deletions(-)
+
+--- a/drivers/xen/xen-acpi-processor.c
++++ b/drivers/xen/xen-acpi-processor.c
+@@ -27,10 +27,10 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/types.h>
++#include <linux/syscore_ops.h>
+ #include <linux/acpi.h>
+ #include <acpi/processor.h>
+ #include <xen/xen.h>
+-#include <xen/xen-ops.h>
+ #include <xen/interface/platform.h>
+ #include <asm/xen/hypercall.h>
+
+@@ -466,15 +466,33 @@ static int xen_upload_processor_pm_data(
+ return rc;
+ }
+
+-static int xen_acpi_processor_resume(struct notifier_block *nb,
+- unsigned long action, void *data)
++static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
+ {
++ int rc;
++
+ bitmap_zero(acpi_ids_done, nr_acpi_bits);
+- return xen_upload_processor_pm_data();
++
++ rc = xen_upload_processor_pm_data();
++ if (rc != 0)
++ pr_info("ACPI data upload failed, error = %d\n", rc);
++}
++
++static void xen_acpi_processor_resume(void)
++{
++ static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
++
++ /*
++ * xen_upload_processor_pm_data() calls non-atomic code.
++ * However, the context for xen_acpi_processor_resume is syscore
++ * with only the boot CPU online and in an atomic context.
++ *
++ * So defer the upload for some point safer.
++ */
++ schedule_work(&wq);
+ }
+
+-struct notifier_block xen_acpi_processor_resume_nb = {
+- .notifier_call = xen_acpi_processor_resume,
++static struct syscore_ops xap_syscore_ops = {
++ .resume = xen_acpi_processor_resume,
+ };
+
+ static int __init xen_acpi_processor_init(void)
+@@ -527,7 +545,7 @@ static int __init xen_acpi_processor_ini
+ if (rc)
+ goto err_unregister;
+
+- xen_resume_notifier_register(&xen_acpi_processor_resume_nb);
++ register_syscore_ops(&xap_syscore_ops);
+
+ return 0;
+ err_unregister:
+@@ -544,7 +562,7 @@ static void __exit xen_acpi_processor_ex
+ {
+ int i;
+
+- xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb);
++ unregister_syscore_ops(&xap_syscore_ops);
+ kfree(acpi_ids_done);
+ kfree(acpi_id_present);
+ kfree(acpi_id_cst_present);