--- /dev/null
+From 4abb951b73ff0a8a979113ef185651aa3c8da19b Mon Sep 17 00:00:00 2001
+From: Erik Schmauss <erik.schmauss@intel.com>
+Date: Wed, 17 Oct 2018 14:09:35 -0700
+Subject: ACPICA: AML interpreter: add region addresses in global list during initialization
+
+From: Erik Schmauss <erik.schmauss@intel.com>
+
+commit 4abb951b73ff0a8a979113ef185651aa3c8da19b upstream.
+
+The table load process omitted adding the operation region address
+range to the global list. This omission is problematic because the OS
+queries the global list to check for address range conflicts before
+deciding which drivers to load. This commit may result in warning
+messages that look like the following:
+
+[ 7.871761] ACPI Warning: system_IO range 0x00000428-0x0000042F conflicts with op_region 0x00000400-0x0000047F (\PMIO) (20180531/utaddress-213)
+[ 7.871769] ACPI: If an ACPI driver is available for this device, you should use it instead of the native driver
+
+However, these messages do not signify regressions. It is a result of
+properly adding address ranges within the global address list.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=200011
+Tested-by: Jean-Marc Lenoir <archlinux@jihemel.com>
+Signed-off-by: Erik Schmauss <erik.schmauss@intel.com>
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: Jean Delvare <jdelvare@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/acpica/dsopcode.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/acpi/acpica/dsopcode.c
++++ b/drivers/acpi/acpica/dsopcode.c
+@@ -417,6 +417,10 @@ acpi_ds_eval_region_operands(struct acpi
+ ACPI_FORMAT_UINT64(obj_desc->region.address),
+ obj_desc->region.length));
+
++ status = acpi_ut_add_address_range(obj_desc->region.space_id,
++ obj_desc->region.address,
++ obj_desc->region.length, node);
++
+ /* Now the address and length are valid for this opregion */
+
+ obj_desc->region.flags |= AOPOBJ_DATA_VALID;
--- /dev/null
+From b469e7e47c8a075cc08bcd1e85d4365134bdcdd5 Mon Sep 17 00:00:00 2001
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Tue, 30 Oct 2018 20:29:53 +0200
+Subject: fanotify: fix handling of events on child sub-directory
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+commit b469e7e47c8a075cc08bcd1e85d4365134bdcdd5 upstream.
+
+When an event is reported on a sub-directory and the parent inode has
+a mark mask with FS_EVENT_ON_CHILD|FS_ISDIR, the event will be sent to
+fsnotify() even if the event type is not in the parent mark mask
+(e.g. FS_OPEN).
+
+Further more, if that event happened on a mount or a filesystem with
+a mount/sb mark that does have that event type in their mask, the "on
+child" event will be reported on the mount/sb mark. That is not
+desired, because user will get a duplicate event for the same action.
+
+Note that the event reported on the victim inode is never merged with
+the event reported on the parent inode, because of the check in
+should_merge(): old_fsn->inode == new_fsn->inode.
+
+Fix this by looking for a match of an actual event type (i.e. not just
+FS_ISDIR) in parent's inode mark mask and by not reporting an "on child"
+event to group if event type is only found on mount/sb marks.
+
+[backport hint: The bug seems to have always been in fanotify, but this
+ patch will only apply cleanly to v4.19.y]
+
+Cc: <stable@vger.kernel.org> # v4.19
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+[amir: backport to v4.19]
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/notify/fanotify/fanotify.c | 10 +++++-----
+ fs/notify/fsnotify.c | 8 ++++++--
+ 2 files changed, 11 insertions(+), 7 deletions(-)
+
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -115,12 +115,12 @@ static bool fanotify_should_send_event(s
+ continue;
+ mark = iter_info->marks[type];
+ /*
+- * if the event is for a child and this inode doesn't care about
+- * events on the child, don't send it!
++ * If the event is for a child and this mark doesn't care about
++ * events on a child, don't send it!
+ */
+- if (type == FSNOTIFY_OBJ_TYPE_INODE &&
+- (event_mask & FS_EVENT_ON_CHILD) &&
+- !(mark->mask & FS_EVENT_ON_CHILD))
++ if (event_mask & FS_EVENT_ON_CHILD &&
++ (type != FSNOTIFY_OBJ_TYPE_INODE ||
++ !(mark->mask & FS_EVENT_ON_CHILD)))
+ continue;
+
+ marks_mask |= mark->mask;
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -161,9 +161,9 @@ int __fsnotify_parent(const struct path
+ parent = dget_parent(dentry);
+ p_inode = parent->d_inode;
+
+- if (unlikely(!fsnotify_inode_watches_children(p_inode)))
++ if (unlikely(!fsnotify_inode_watches_children(p_inode))) {
+ __fsnotify_update_child_dentry_flags(p_inode);
+- else if (p_inode->i_fsnotify_mask & mask) {
++ } else if (p_inode->i_fsnotify_mask & mask & ALL_FSNOTIFY_EVENTS) {
+ struct name_snapshot name;
+
+ /* we are notifying a parent so come up with the new mask which
+@@ -332,6 +332,10 @@ int fsnotify(struct inode *to_tell, __u3
+ else
+ mnt = NULL;
+
++ /* An event "on child" is not intended for a mount mark */
++ if (mask & FS_EVENT_ON_CHILD)
++ mnt = NULL;
++
+ /*
+ * Optimization: srcu_read_lock() has a memory barrier which can
+ * be expensive. It protects walking the *_fsnotify_marks lists.
--- /dev/null
+From a0e0cb82804a6a21d9067022c2dfdf80d11da429 Mon Sep 17 00:00:00 2001
+From: "Michael J. Ruhl" <michael.j.ruhl@intel.com>
+Date: Mon, 10 Sep 2018 09:39:03 -0700
+Subject: IB/hfi1: Eliminate races in the SDMA send error path
+
+From: Michael J. Ruhl <michael.j.ruhl@intel.com>
+
+commit a0e0cb82804a6a21d9067022c2dfdf80d11da429 upstream.
+
+pq_update() can only be called in two places: from the completion
+function when the complete (npkts) sequence of packets has been
+submitted and processed, or from setup function if a subset of the
+packets were submitted (i.e. the error path).
+
+Currently both paths can call pq_update() if an error occurrs. This
+race will cause the n_req value to go negative, hanging file_close(),
+or cause a crash by freeing the txlist more than once.
+
+Several variables are used to determine SDMA send state. Most of
+these are unnecessary, and have code inspectible races between the
+setup function and the completion function, in both the send path and
+the error path.
+
+The request 'status' value can be set by the setup or by the
+completion function. This is code inspectibly racy. Since the status
+is not needed in the completion code or by the caller it has been
+removed.
+
+The request 'done' value races between usage by the setup and the
+completion function. The completion function does not need this.
+When the number of processed packets matches npkts, it is done.
+
+The 'has_error' value races between usage of the setup and the
+completion function. This can cause incorrect error handling and leave
+the n_req in an incorrect value (i.e. negative).
+
+Simplify the code by removing all of the unneeded state checks and
+variables.
+
+Clean up iovs node when it is freed.
+
+Eliminate race conditions in the error path:
+
+If all packets are submitted, the completion handler will set the
+completion status correctly (ok or aborted).
+
+If all packets are not submitted, the caller must wait until the
+submitted packets have completed, and then set the completion status.
+
+These two change eliminate the race condition in the error path.
+
+Reviewed-by: Mitko Haralanov <mitko.haralanov@intel.com>
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/user_sdma.c | 85 ++++++++++++++-------------------
+ drivers/infiniband/hw/hfi1/user_sdma.h | 3 -
+ 2 files changed, 38 insertions(+), 50 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -328,7 +328,6 @@ int hfi1_user_sdma_process_request(struc
+ u8 opcode, sc, vl;
+ u16 pkey;
+ u32 slid;
+- int req_queued = 0;
+ u16 dlid;
+ u32 selector;
+
+@@ -392,7 +391,6 @@ int hfi1_user_sdma_process_request(struc
+ req->data_len = 0;
+ req->pq = pq;
+ req->cq = cq;
+- req->status = -1;
+ req->ahg_idx = -1;
+ req->iov_idx = 0;
+ req->sent = 0;
+@@ -400,12 +398,14 @@ int hfi1_user_sdma_process_request(struc
+ req->seqcomp = 0;
+ req->seqsubmitted = 0;
+ req->tids = NULL;
+- req->done = 0;
+ req->has_error = 0;
+ INIT_LIST_HEAD(&req->txps);
+
+ memcpy(&req->info, &info, sizeof(info));
+
++ /* The request is initialized, count it */
++ atomic_inc(&pq->n_reqs);
++
+ if (req_opcode(info.ctrl) == EXPECTED) {
+ /* expected must have a TID info and at least one data vector */
+ if (req->data_iovs < 2) {
+@@ -500,7 +500,6 @@ int hfi1_user_sdma_process_request(struc
+ ret = pin_vector_pages(req, &req->iovs[i]);
+ if (ret) {
+ req->data_iovs = i;
+- req->status = ret;
+ goto free_req;
+ }
+ req->data_len += req->iovs[i].iov.iov_len;
+@@ -561,14 +560,10 @@ int hfi1_user_sdma_process_request(struc
+ req->ahg_idx = sdma_ahg_alloc(req->sde);
+
+ set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
+- atomic_inc(&pq->n_reqs);
+- req_queued = 1;
+ /* Send the first N packets in the request to buy us some time */
+ ret = user_sdma_send_pkts(req, pcount);
+- if (unlikely(ret < 0 && ret != -EBUSY)) {
+- req->status = ret;
++ if (unlikely(ret < 0 && ret != -EBUSY))
+ goto free_req;
+- }
+
+ /*
+ * It is possible that the SDMA engine would have processed all the
+@@ -588,14 +583,8 @@ int hfi1_user_sdma_process_request(struc
+ while (req->seqsubmitted != req->info.npkts) {
+ ret = user_sdma_send_pkts(req, pcount);
+ if (ret < 0) {
+- if (ret != -EBUSY) {
+- req->status = ret;
+- WRITE_ONCE(req->has_error, 1);
+- if (READ_ONCE(req->seqcomp) ==
+- req->seqsubmitted - 1)
+- goto free_req;
+- return ret;
+- }
++ if (ret != -EBUSY)
++ goto free_req;
+ wait_event_interruptible_timeout(
+ pq->busy.wait_dma,
+ (pq->state == SDMA_PKT_Q_ACTIVE),
+@@ -606,10 +595,19 @@ int hfi1_user_sdma_process_request(struc
+ *count += idx;
+ return 0;
+ free_req:
+- user_sdma_free_request(req, true);
+- if (req_queued)
++ /*
++ * If the submitted seqsubmitted == npkts, the completion routine
++ * controls the final state. If sequbmitted < npkts, wait for any
++ * outstanding packets to finish before cleaning up.
++ */
++ if (req->seqsubmitted < req->info.npkts) {
++ if (req->seqsubmitted)
++ wait_event(pq->busy.wait_dma,
++ (req->seqcomp == req->seqsubmitted - 1));
++ user_sdma_free_request(req, true);
+ pq_update(pq);
+- set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
++ set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
++ }
+ return ret;
+ }
+
+@@ -917,7 +915,6 @@ dosend:
+ ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
+ req->seqsubmitted += count;
+ if (req->seqsubmitted == req->info.npkts) {
+- WRITE_ONCE(req->done, 1);
+ /*
+ * The txreq has already been submitted to the HW queue
+ * so we can free the AHG entry now. Corruption will not
+@@ -1365,11 +1362,15 @@ static int set_txreq_header_ahg(struct u
+ return idx;
+ }
+
+-/*
+- * SDMA tx request completion callback. Called when the SDMA progress
+- * state machine gets notification that the SDMA descriptors for this
+- * tx request have been processed by the DMA engine. Called in
+- * interrupt context.
++/**
++ * user_sdma_txreq_cb() - SDMA tx request completion callback.
++ * @txreq: valid sdma tx request
++ * @status: success/failure of request
++ *
++ * Called when the SDMA progress state machine gets notification that
++ * the SDMA descriptors for this tx request have been processed by the
++ * DMA engine. Called in interrupt context.
++ * Only do work on completed sequences.
+ */
+ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
+ {
+@@ -1378,7 +1379,7 @@ static void user_sdma_txreq_cb(struct sd
+ struct user_sdma_request *req;
+ struct hfi1_user_sdma_pkt_q *pq;
+ struct hfi1_user_sdma_comp_q *cq;
+- u16 idx;
++ enum hfi1_sdma_comp_state state = COMPLETE;
+
+ if (!tx->req)
+ return;
+@@ -1391,31 +1392,19 @@ static void user_sdma_txreq_cb(struct sd
+ SDMA_DBG(req, "SDMA completion with error %d",
+ status);
+ WRITE_ONCE(req->has_error, 1);
++ state = ERROR;
+ }
+
+ req->seqcomp = tx->seqnum;
+ kmem_cache_free(pq->txreq_cache, tx);
+- tx = NULL;
+
+- idx = req->info.comp_idx;
+- if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
+- if (req->seqcomp == req->info.npkts - 1) {
+- req->status = 0;
+- user_sdma_free_request(req, false);
+- pq_update(pq);
+- set_comp_state(pq, cq, idx, COMPLETE, 0);
+- }
+- } else {
+- if (status != SDMA_TXREQ_S_OK)
+- req->status = status;
+- if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) &&
+- (READ_ONCE(req->done) ||
+- READ_ONCE(req->has_error))) {
+- user_sdma_free_request(req, false);
+- pq_update(pq);
+- set_comp_state(pq, cq, idx, ERROR, req->status);
+- }
+- }
++ /* sequence isn't complete? We are done */
++ if (req->seqcomp != req->info.npkts - 1)
++ return;
++
++ user_sdma_free_request(req, false);
++ set_comp_state(pq, cq, req->info.comp_idx, state, status);
++ pq_update(pq);
+ }
+
+ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
+@@ -1448,6 +1437,8 @@ static void user_sdma_free_request(struc
+ if (!node)
+ continue;
+
++ req->iovs[i].node = NULL;
++
+ if (unpin)
+ hfi1_mmu_rb_remove(req->pq->handler,
+ &node->rb);
+--- a/drivers/infiniband/hw/hfi1/user_sdma.h
++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
+@@ -205,8 +205,6 @@ struct user_sdma_request {
+ /* Writeable fields shared with interrupt */
+ u64 seqcomp ____cacheline_aligned_in_smp;
+ u64 seqsubmitted;
+- /* status of the last txreq completed */
+- int status;
+
+ /* Send side fields */
+ struct list_head txps ____cacheline_aligned_in_smp;
+@@ -228,7 +226,6 @@ struct user_sdma_request {
+ u16 tididx;
+ /* progress index moving along the iovs array */
+ u8 iov_idx;
+- u8 done;
+ u8 has_error;
+
+ struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];