--- /dev/null
+From 392a17b10ec4320d3c0e96e2a23ebaad1123b989 Mon Sep 17 00:00:00 2001
+From: Corey Minyard <cminyard@mvista.com>
+Date: Sat, 29 Jul 2017 21:14:55 -0500
+Subject: ipmi: fix unsigned long underflow
+
+From: Corey Minyard <cminyard@mvista.com>
+
+commit 392a17b10ec4320d3c0e96e2a23ebaad1123b989 upstream.
+
+When I set the timeout to a specific value such as 500ms, the timeout
+event will not happen in time due to the overflow in function
+check_msg_timeout:
+...
+ ent->timeout -= timeout_period;
+ if (ent->timeout > 0)
+ return;
+...
+
+The type of timeout_period is long, but ent->timeout is unsigned long.
+This patch makes the type consistent.
+
+Reported-by: Weilong Chen <chenweilong@huawei.com>
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Tested-by: Weilong Chen <chenweilong@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/ipmi/ipmi_msghandler.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -4029,7 +4029,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struc
+ }
+
+ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
+- struct list_head *timeouts, long timeout_period,
++ struct list_head *timeouts,
++ unsigned long timeout_period,
+ int slot, unsigned long *flags,
+ unsigned int *waiting_msgs)
+ {
+@@ -4042,8 +4043,8 @@ static void check_msg_timeout(ipmi_smi_t
+ if (!ent->inuse)
+ return;
+
+- ent->timeout -= timeout_period;
+- if (ent->timeout > 0) {
++ if (timeout_period < ent->timeout) {
++ ent->timeout -= timeout_period;
+ (*waiting_msgs)++;
+ return;
+ }
+@@ -4109,7 +4110,8 @@ static void check_msg_timeout(ipmi_smi_t
+ }
+ }
+
+-static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
++static unsigned int ipmi_timeout_handler(ipmi_smi_t intf,
++ unsigned long timeout_period)
+ {
+ struct list_head timeouts;
+ struct ipmi_recv_msg *msg, *msg2;
--- /dev/null
+From d135e5750205a21a212a19dbb05aeb339e2cbea7 Mon Sep 17 00:00:00 2001
+From: Pavel Tatashin <pasha.tatashin@oracle.com>
+Date: Wed, 15 Nov 2017 17:38:41 -0800
+Subject: mm/page_alloc.c: broken deferred calculation
+
+From: Pavel Tatashin <pasha.tatashin@oracle.com>
+
+commit d135e5750205a21a212a19dbb05aeb339e2cbea7 upstream.
+
+In reset_deferred_meminit() we determine number of pages that must not
+be deferred. We initialize pages for at least 2G of memory, but also
+pages for reserved memory in this node.
+
+The reserved memory is determined in this function:
+memblock_reserved_memory_within(), which operates over physical
+addresses, and returns size in bytes. However, reset_deferred_meminit()
+assumes that that this function operates with pfns, and returns page
+count.
+
+The result is that in the best case machine boots slower than expected
+due to initializing more pages than needed in single thread, and in the
+worst case panics because fewer than needed pages are initialized early.
+
+Link: http://lkml.kernel.org/r/20171021011707.15191-1-pasha.tatashin@oracle.com
+Fixes: 864b9a393dcb ("mm: consider memblock reservations for deferred memory initialization sizing")
+Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/mmzone.h | 3 ++-
+ mm/page_alloc.c | 27 ++++++++++++++++++---------
+ 2 files changed, 20 insertions(+), 10 deletions(-)
+
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -688,7 +688,8 @@ typedef struct pglist_data {
+ * is the first PFN that needs to be initialised.
+ */
+ unsigned long first_deferred_pfn;
+- unsigned long static_init_size;
++ /* Number of non-deferred pages */
++ unsigned long static_init_pgcnt;
+ #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
+ } pg_data_t;
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -267,28 +267,37 @@ EXPORT_SYMBOL(nr_online_nodes);
+ int page_group_by_mobility_disabled __read_mostly;
+
+ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
++
++/*
++ * Determine how many pages need to be initialized durig early boot
++ * (non-deferred initialization).
++ * The value of first_deferred_pfn will be set later, once non-deferred pages
++ * are initialized, but for now set it ULONG_MAX.
++ */
+ static inline void reset_deferred_meminit(pg_data_t *pgdat)
+ {
+- unsigned long max_initialise;
+- unsigned long reserved_lowmem;
++ phys_addr_t start_addr, end_addr;
++ unsigned long max_pgcnt;
++ unsigned long reserved;
+
+ /*
+ * Initialise at least 2G of a node but also take into account that
+ * two large system hashes that can take up 1GB for 0.25TB/node.
+ */
+- max_initialise = max(2UL << (30 - PAGE_SHIFT),
+- (pgdat->node_spanned_pages >> 8));
++ max_pgcnt = max(2UL << (30 - PAGE_SHIFT),
++ (pgdat->node_spanned_pages >> 8));
+
+ /*
+ * Compensate the all the memblock reservations (e.g. crash kernel)
+ * from the initial estimation to make sure we will initialize enough
+ * memory to boot.
+ */
+- reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
+- pgdat->node_start_pfn + max_initialise);
+- max_initialise += reserved_lowmem;
++ start_addr = PFN_PHYS(pgdat->node_start_pfn);
++ end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt);
++ reserved = memblock_reserved_memory_within(start_addr, end_addr);
++ max_pgcnt += PHYS_PFN(reserved);
+
+- pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
++ pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages);
+ pgdat->first_deferred_pfn = ULONG_MAX;
+ }
+
+@@ -324,7 +333,7 @@ static inline bool update_defer_init(pg_
+ return true;
+ /* Initialise at least 2G of the highest zone */
+ (*nr_initialised)++;
+- if ((*nr_initialised > pgdat->static_init_size) &&
++ if ((*nr_initialised > pgdat->static_init_pgcnt) &&
+ (pfn & (PAGES_PER_SECTION - 1)) == 0) {
+ pgdat->first_deferred_pfn = pfn;
+ return false;
--- /dev/null
+From keith.busch@intel.com Tue Nov 21 17:57:03 2017
+From: Keith Busch <keith.busch@intel.com>
+Date: Thu, 16 Nov 2017 16:57:10 -0700
+Subject: [PATCH-stable] nvme: Fix memory order on async queue deletion
+To: linux-nvme@lists.infradead.org, stable@vger.kernel.org
+Cc: Greg KH <gregkh@linuxfoundation.org>, Keith Busch <keith.busch@intel.com>
+Message-ID: <20171116235710.20224-1-keith.busch@intel.com>
+
+From: Keith Busch <keith.busch@intel.com>
+
+This patch is a fix specific to the 3.19 - 4.4 kernels. The 4.5 kernel
+inadvertently fixed this bug differently (db3cbfff5bcc0), but is not
+a stable candidate due it being a complicated re-write of the entire
+feature.
+
+This patch fixes a potential timing bug with nvme's asynchronous queue
+deletion, which causes an allocated request to be accidentally released
+due to the ordering of the shared completion context among the sq/cq
+pair. The completion context saves the request that issued the queue
+deletion. If the submission side deletion happens to reset the active
+request, the completion side will release the wrong request tag back into
+the pool of available tags. This means the driver will create multiple
+commands with the same tag, corrupting the queue context.
+
+The error is observable in the kernel logs like:
+
+ "nvme XX:YY:ZZ completed id XX twice on qid:0"
+
+In this particular case, this message occurs because the queue is
+corrupted.
+
+The following timing sequence demonstrates the error:
+
+ CPU A CPU B
+ ----------------------- -----------------------------
+ nvme_irq
+ nvme_process_cq
+ async_completion
+ queue_kthread_work -----------> nvme_del_sq_work_handler
+ nvme_delete_cq
+ adapter_async_del_queue
+ nvme_submit_admin_async_cmd
+ cmdinfo->req = req;
+
+ blk_mq_free_request(cmdinfo->req); <-- wrong request!!!
+
+This patch fixes the bug by releasing the request in the completion side
+prior to waking the submission thread, such that that thread can't muck
+with the shared completion context.
+
+Fixes: a4aea5623d4a5 ("NVMe: Convert to blk-mq")
+
+Signed-off-by: Keith Busch <keith.busch@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/host/pci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -350,8 +350,8 @@ static void async_completion(struct nvme
+ struct async_cmd_info *cmdinfo = ctx;
+ cmdinfo->result = le32_to_cpup(&cqe->result);
+ cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+- queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
+ blk_mq_free_request(cmdinfo->req);
++ queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
+ }
+
+ static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
--- /dev/null
+From 28f5a8a7c033cbf3e32277f4cc9c6afd74f05300 Mon Sep 17 00:00:00 2001
+From: alex chen <alex.chen@huawei.com>
+Date: Wed, 15 Nov 2017 17:31:40 -0800
+Subject: ocfs2: should wait dio before inode lock in ocfs2_setattr()
+
+From: alex chen <alex.chen@huawei.com>
+
+commit 28f5a8a7c033cbf3e32277f4cc9c6afd74f05300 upstream.
+
+we should wait dio requests to finish before inode lock in
+ocfs2_setattr(), otherwise the following deadlock will happen:
+
+process 1 process 2 process 3
+truncate file 'A' end_io of writing file 'A' receiving the bast messages
+ocfs2_setattr
+ ocfs2_inode_lock_tracker
+ ocfs2_inode_lock_full
+ inode_dio_wait
+ __inode_dio_wait
+ -->waiting for all dio
+ requests finish
+ dlm_proxy_ast_handler
+ dlm_do_local_bast
+ ocfs2_blocking_ast
+ ocfs2_generic_handle_bast
+ set OCFS2_LOCK_BLOCKED flag
+ dio_end_io
+ dio_bio_end_aio
+ dio_complete
+ ocfs2_dio_end_io
+ ocfs2_dio_end_io_write
+ ocfs2_inode_lock
+ __ocfs2_cluster_lock
+ ocfs2_wait_for_mask
+ -->waiting for OCFS2_LOCK_BLOCKED
+ flag to be cleared, that is waiting
+ for 'process 1' unlocking the inode lock
+ inode_dio_end
+ -->here dec the i_dio_count, but will never
+ be called, so a deadlock happened.
+
+Link: http://lkml.kernel.org/r/59F81636.70508@huawei.com
+Signed-off-by: Alex Chen <alex.chen@huawei.com>
+Reviewed-by: Jun Piao <piaojun@huawei.com>
+Reviewed-by: Joseph Qi <jiangqi903@gmail.com>
+Acked-by: Changwei Ge <ge.changwei@h3c.com>
+Cc: Mark Fasheh <mfasheh@versity.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/file.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1166,6 +1166,13 @@ int ocfs2_setattr(struct dentry *dentry,
+ }
+ size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
+ if (size_change) {
++ /*
++ * Here we should wait dio to finish before inode lock
++ * to avoid a deadlock between ocfs2_setattr() and
++ * ocfs2_dio_end_io_write()
++ */
++ inode_dio_wait(inode);
++
+ status = ocfs2_rw_lock(inode, 1);
+ if (status < 0) {
+ mlog_errno(status);
+@@ -1186,8 +1193,6 @@ int ocfs2_setattr(struct dentry *dentry,
+ if (status)
+ goto bail_unlock;
+
+- inode_dio_wait(inode);
+-
+ if (i_size_read(inode) >= attr->ia_size) {
+ if (ocfs2_should_order_data(inode)) {
+ status = ocfs2_begin_ordered_truncate(inode,
ima-do-not-update-security.ima-if-appraisal-status-is-not-integrity_pass.patch
serial-omap-fix-efr-write-on-rts-deassertion.patch
arm64-fix-dump_instr-when-pan-and-uao-are-in-use.patch
+nvme-fix-memory-order-on-async-queue-deletion.patch
+ocfs2-should-wait-dio-before-inode-lock-in-ocfs2_setattr.patch
+ipmi-fix-unsigned-long-underflow.patch
+mm-page_alloc.c-broken-deferred-calculation.patch