--- /dev/null
+From d3abaf43bab8d5b0a3c6b982100d9e2be96de4ad Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Sat, 13 Oct 2018 20:32:17 -0700
+Subject: acpi, nfit: Fix Address Range Scrub completion tracking
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit d3abaf43bab8d5b0a3c6b982100d9e2be96de4ad upstream.
+
+The Address Range Scrub implementation tried to skip running scrubs
+against ranges that were already scrubbed by the BIOS. Unfortunately
+that support also resulted in early scrub completions as evidenced by
+this debug output from nfit_test:
+
+ nd_region region9: ARS: range 1 short complete
+ nd_region region3: ARS: range 1 short complete
+ nd_region region4: ARS: range 2 ARS start (0)
+ nd_region region4: ARS: range 2 short complete
+
+...i.e. completions without any indications that the scrub was started.
+
+This state of affairs was hard to see in the code due to the
+proliferation of state bits and mistakenly trying to track done state
+per-range when the completion is a global property of the bus.
+
+So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
+ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
+ARS_REQ_LONG. The implementation will still complete and reap the
+results of BIOS initiated ARS, but it will not attempt to use that
+information to affect the completion status of scrubbing the ranges from
+a Linux perspective.
+
+Instead, try to synchronously run a short ARS per range at init time and
+schedule a long scrub in the background. If ARS is busy with an ARS
+request, schedule both a short and a long scrub for when ARS returns to
+idle. This logic also satisfies the intent of what ARS_REQ_REDO was
+trying to achieve. The new rule is that the REQ flag stays set until the
+next successful ars_start() for that range.
+
+With the new policy that the REQ flags are not cleared until the next
+start, the implementation no longer loses requests as can be seen from
+the following log:
+
+ nd_region region3: ARS: range 1 ARS start short (0)
+ nd_region region9: ARS: range 1 ARS start short (0)
+ nd_region region3: ARS: range 1 complete
+ nd_region region4: ARS: range 2 ARS start short (0)
+ nd_region region9: ARS: range 1 complete
+ nd_region region9: ARS: range 1 ARS start long (0)
+ nd_region region4: ARS: range 2 complete
+ nd_region region3: ARS: range 1 ARS start long (0)
+ nd_region region9: ARS: range 1 complete
+ nd_region region3: ARS: range 1 complete
+ nd_region region4: ARS: range 2 ARS start long (0)
+ nd_region region4: ARS: range 2 complete
+
+...note that the nfit_test emulated driver provides 2 buses, that is why
+some of the range indices are duplicated. Notice that each range
+now successfully completes a short and long scrub.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
+Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
+Reported-by: Jacek Zloch <jacek.zloch@intel.com>
+Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/nfit/core.c | 167 ++++++++++++++++++++++++++---------------------
+ drivers/acpi/nfit/nfit.h | 10 +-
+ 2 files changed, 100 insertions(+), 77 deletions(-)
+
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -2466,7 +2466,8 @@ static int ars_get_cap(struct acpi_nfit_
+ return cmd_rc;
+ }
+
+-static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
++static int ars_start(struct acpi_nfit_desc *acpi_desc,
++ struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
+ {
+ int rc;
+ int cmd_rc;
+@@ -2477,7 +2478,7 @@ static int ars_start(struct acpi_nfit_de
+ memset(&ars_start, 0, sizeof(ars_start));
+ ars_start.address = spa->address;
+ ars_start.length = spa->length;
+- if (test_bit(ARS_SHORT, &nfit_spa->ars_state))
++ if (req_type == ARS_REQ_SHORT)
+ ars_start.flags = ND_ARS_RETURN_PREV_DATA;
+ if (nfit_spa_type(spa) == NFIT_SPA_PM)
+ ars_start.type = ND_ARS_PERSISTENT;
+@@ -2534,6 +2535,15 @@ static void ars_complete(struct acpi_nfi
+ struct nd_region *nd_region = nfit_spa->nd_region;
+ struct device *dev;
+
++ lockdep_assert_held(&acpi_desc->init_mutex);
++ /*
++ * Only advance the ARS state for ARS runs initiated by the
++ * kernel, ignore ARS results from BIOS initiated runs for scrub
++ * completion tracking.
++ */
++ if (acpi_desc->scrub_spa != nfit_spa)
++ return;
++
+ if ((ars_status->address >= spa->address && ars_status->address
+ < spa->address + spa->length)
+ || (ars_status->address < spa->address)) {
+@@ -2553,28 +2563,13 @@ static void ars_complete(struct acpi_nfi
+ } else
+ return;
+
+- if (test_bit(ARS_DONE, &nfit_spa->ars_state))
+- return;
+-
+- if (!test_and_clear_bit(ARS_REQ, &nfit_spa->ars_state))
+- return;
+-
++ acpi_desc->scrub_spa = NULL;
+ if (nd_region) {
+ dev = nd_region_dev(nd_region);
+ nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
+ } else
+ dev = acpi_desc->dev;
+-
+- dev_dbg(dev, "ARS: range %d %s complete\n", spa->range_index,
+- test_bit(ARS_SHORT, &nfit_spa->ars_state)
+- ? "short" : "long");
+- clear_bit(ARS_SHORT, &nfit_spa->ars_state);
+- if (test_and_clear_bit(ARS_REQ_REDO, &nfit_spa->ars_state)) {
+- set_bit(ARS_SHORT, &nfit_spa->ars_state);
+- set_bit(ARS_REQ, &nfit_spa->ars_state);
+- dev_dbg(dev, "ARS: processing scrub request received while in progress\n");
+- } else
+- set_bit(ARS_DONE, &nfit_spa->ars_state);
++ dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
+ }
+
+ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
+@@ -2855,46 +2850,55 @@ static int acpi_nfit_query_poison(struct
+ return 0;
+ }
+
+-static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa,
+- int *query_rc)
++static int ars_register(struct acpi_nfit_desc *acpi_desc,
++ struct nfit_spa *nfit_spa)
+ {
+- int rc = *query_rc;
++ int rc;
+
+- if (no_init_ars)
++ if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
+ return acpi_nfit_register_region(acpi_desc, nfit_spa);
+
+- set_bit(ARS_REQ, &nfit_spa->ars_state);
+- set_bit(ARS_SHORT, &nfit_spa->ars_state);
++ set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
++ set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
+
+- switch (rc) {
++ switch (acpi_nfit_query_poison(acpi_desc)) {
+ case 0:
+ case -EAGAIN:
+- rc = ars_start(acpi_desc, nfit_spa);
+- if (rc == -EBUSY) {
+- *query_rc = rc;
++ rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
++ /* shouldn't happen, try again later */
++ if (rc == -EBUSY)
+ break;
+- } else if (rc == 0) {
+- rc = acpi_nfit_query_poison(acpi_desc);
+- } else {
++ if (rc) {
+ set_bit(ARS_FAILED, &nfit_spa->ars_state);
+ break;
+ }
+- if (rc == -EAGAIN)
+- clear_bit(ARS_SHORT, &nfit_spa->ars_state);
+- else if (rc == 0)
+- ars_complete(acpi_desc, nfit_spa);
++ clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
++ rc = acpi_nfit_query_poison(acpi_desc);
++ if (rc)
++ break;
++ acpi_desc->scrub_spa = nfit_spa;
++ ars_complete(acpi_desc, nfit_spa);
++ /*
++ * If ars_complete() says we didn't complete the
++ * short scrub, we'll try again with a long
++ * request.
++ */
++ acpi_desc->scrub_spa = NULL;
+ break;
+ case -EBUSY:
++ case -ENOMEM:
+ case -ENOSPC:
++ /*
++ * BIOS was using ARS, wait for it to complete (or
++ * resources to become available) and then perform our
++ * own scrubs.
++ */
+ break;
+ default:
+ set_bit(ARS_FAILED, &nfit_spa->ars_state);
+ break;
+ }
+
+- if (test_and_clear_bit(ARS_DONE, &nfit_spa->ars_state))
+- set_bit(ARS_REQ, &nfit_spa->ars_state);
+-
+ return acpi_nfit_register_region(acpi_desc, nfit_spa);
+ }
+
+@@ -2916,6 +2920,8 @@ static unsigned int __acpi_nfit_scrub(st
+ struct device *dev = acpi_desc->dev;
+ struct nfit_spa *nfit_spa;
+
++ lockdep_assert_held(&acpi_desc->init_mutex);
++
+ if (acpi_desc->cancel)
+ return 0;
+
+@@ -2939,21 +2945,49 @@ static unsigned int __acpi_nfit_scrub(st
+
+ ars_complete_all(acpi_desc);
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
++ enum nfit_ars_state req_type;
++ int rc;
++
+ if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
+ continue;
+- if (test_bit(ARS_REQ, &nfit_spa->ars_state)) {
+- int rc = ars_start(acpi_desc, nfit_spa);
+
+- clear_bit(ARS_DONE, &nfit_spa->ars_state);
+- dev = nd_region_dev(nfit_spa->nd_region);
+- dev_dbg(dev, "ARS: range %d ARS start (%d)\n",
+- nfit_spa->spa->range_index, rc);
+- if (rc == 0 || rc == -EBUSY)
+- return 1;
+- dev_err(dev, "ARS: range %d ARS failed (%d)\n",
+- nfit_spa->spa->range_index, rc);
+- set_bit(ARS_FAILED, &nfit_spa->ars_state);
++ /* prefer short ARS requests first */
++ if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
++ req_type = ARS_REQ_SHORT;
++ else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
++ req_type = ARS_REQ_LONG;
++ else
++ continue;
++ rc = ars_start(acpi_desc, nfit_spa, req_type);
++
++ dev = nd_region_dev(nfit_spa->nd_region);
++ dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
++ nfit_spa->spa->range_index,
++ req_type == ARS_REQ_SHORT ? "short" : "long",
++ rc);
++ /*
++ * Hmm, we raced someone else starting ARS? Try again in
++ * a bit.
++ */
++ if (rc == -EBUSY)
++ return 1;
++ if (rc == 0) {
++ dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
++ "scrub start while range %d active\n",
++ acpi_desc->scrub_spa->spa->range_index);
++ clear_bit(req_type, &nfit_spa->ars_state);
++ acpi_desc->scrub_spa = nfit_spa;
++ /*
++ * Consider this spa last for future scrub
++ * requests
++ */
++ list_move_tail(&nfit_spa->list, &acpi_desc->spas);
++ return 1;
+ }
++
++ dev_err(dev, "ARS: range %d ARS failed (%d)\n",
++ nfit_spa->spa->range_index, rc);
++ set_bit(ARS_FAILED, &nfit_spa->ars_state);
+ }
+ return 0;
+ }
+@@ -3009,6 +3043,7 @@ static void acpi_nfit_init_ars(struct ac
+ struct nd_cmd_ars_cap ars_cap;
+ int rc;
+
++ set_bit(ARS_FAILED, &nfit_spa->ars_state);
+ memset(&ars_cap, 0, sizeof(ars_cap));
+ rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
+ if (rc < 0)
+@@ -3025,16 +3060,14 @@ static void acpi_nfit_init_ars(struct ac
+ nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
+ acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
+ clear_bit(ARS_FAILED, &nfit_spa->ars_state);
+- set_bit(ARS_REQ, &nfit_spa->ars_state);
+ }
+
+ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
+ {
+ struct nfit_spa *nfit_spa;
+- int rc, query_rc;
++ int rc;
+
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+- set_bit(ARS_FAILED, &nfit_spa->ars_state);
+ switch (nfit_spa_type(nfit_spa->spa)) {
+ case NFIT_SPA_VOLATILE:
+ case NFIT_SPA_PM:
+@@ -3043,20 +3076,12 @@ static int acpi_nfit_register_regions(st
+ }
+ }
+
+- /*
+- * Reap any results that might be pending before starting new
+- * short requests.
+- */
+- query_rc = acpi_nfit_query_poison(acpi_desc);
+- if (query_rc == 0)
+- ars_complete_all(acpi_desc);
+-
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ switch (nfit_spa_type(nfit_spa->spa)) {
+ case NFIT_SPA_VOLATILE:
+ case NFIT_SPA_PM:
+ /* register regions and kick off initial ARS run */
+- rc = ars_register(acpi_desc, nfit_spa, &query_rc);
++ rc = ars_register(acpi_desc, nfit_spa);
+ if (rc)
+ return rc;
+ break;
+@@ -3251,7 +3276,8 @@ static int acpi_nfit_clear_to_send(struc
+ return 0;
+ }
+
+-int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
++int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
++ enum nfit_ars_state req_type)
+ {
+ struct device *dev = acpi_desc->dev;
+ int scheduled = 0, busy = 0;
+@@ -3271,14 +3297,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfi
+ if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
+ continue;
+
+- if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) {
++ if (test_and_set_bit(req_type, &nfit_spa->ars_state))
+ busy++;
+- set_bit(ARS_REQ_REDO, &nfit_spa->ars_state);
+- } else {
+- if (test_bit(ARS_SHORT, &flags))
+- set_bit(ARS_SHORT, &nfit_spa->ars_state);
++ else
+ scheduled++;
+- }
+ }
+ if (scheduled) {
+ sched_ars(acpi_desc);
+@@ -3464,10 +3486,11 @@ static void acpi_nfit_update_notify(stru
+ static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
+ {
+ struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
+- unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ?
+- 0 : 1 << ARS_SHORT;
+
+- acpi_nfit_ars_rescan(acpi_desc, flags);
++ if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
++ acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
++ else
++ acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
+ }
+
+ void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
+--- a/drivers/acpi/nfit/nfit.h
++++ b/drivers/acpi/nfit/nfit.h
+@@ -118,10 +118,8 @@ enum nfit_dimm_notifiers {
+ };
+
+ enum nfit_ars_state {
+- ARS_REQ,
+- ARS_REQ_REDO,
+- ARS_DONE,
+- ARS_SHORT,
++ ARS_REQ_SHORT,
++ ARS_REQ_LONG,
+ ARS_FAILED,
+ };
+
+@@ -198,6 +196,7 @@ struct acpi_nfit_desc {
+ struct device *dev;
+ u8 ars_start_flags;
+ struct nd_cmd_ars_status *ars_status;
++ struct nfit_spa *scrub_spa;
+ struct delayed_work dwork;
+ struct list_head list;
+ struct kernfs_node *scrub_count_state;
+@@ -252,7 +251,8 @@ struct nfit_blk {
+
+ extern struct list_head acpi_descs;
+ extern struct mutex acpi_desc_lock;
+-int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags);
++int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
++ enum nfit_ars_state req_type);
+
+ #ifdef CONFIG_X86_MCE
+ void nfit_mce_register(void);
--- /dev/null
+From 83b2348e2755db48fa8f40fdb791f366fabc0ba0 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Wed, 17 Oct 2018 13:24:56 -0700
+Subject: ACPI / OSL: Use 'jiffies' as the time bassis for acpi_os_get_timer()
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+commit 83b2348e2755db48fa8f40fdb791f366fabc0ba0 upstream.
+
+Since acpi_os_get_timer() may be called after the timer subsystem has
+been suspended, use the jiffies counter instead of ktime_get(). This
+patch avoids that the following warning is reported during hibernation:
+
+WARNING: CPU: 0 PID: 612 at kernel/time/timekeeping.c:751 ktime_get+0x116/0x120
+RIP: 0010:ktime_get+0x116/0x120
+Call Trace:
+ acpi_os_get_timer+0xe/0x30
+ acpi_ds_exec_begin_control_op+0x175/0x1de
+ acpi_ds_exec_begin_op+0x2c7/0x39a
+ acpi_ps_create_op+0x573/0x5e4
+ acpi_ps_parse_loop+0x349/0x1220
+ acpi_ps_parse_aml+0x25b/0x6da
+ acpi_ps_execute_method+0x327/0x41b
+ acpi_ns_evaluate+0x4e9/0x6f5
+ acpi_ut_evaluate_object+0xd9/0x2f2
+ acpi_rs_get_method_data+0x8f/0x114
+ acpi_walk_resources+0x122/0x1b6
+ acpi_pci_link_get_current.isra.2+0x157/0x280
+ acpi_pci_link_set+0x32f/0x4a0
+ irqrouter_resume+0x58/0x80
+ syscore_resume+0x84/0x380
+ hibernation_snapshot+0x20c/0x4f0
+ hibernate+0x22d/0x3a6
+ state_store+0x99/0xa0
+ kobj_attr_store+0x37/0x50
+ sysfs_kf_write+0x87/0xa0
+ kernfs_fop_write+0x1a5/0x240
+ __vfs_write+0xd2/0x410
+ vfs_write+0x101/0x250
+ ksys_write+0xab/0x120
+ __x64_sys_write+0x43/0x50
+ do_syscall_64+0x71/0x220
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Fixes: 164a08cee135 (ACPICA: Dispatcher: Introduce timeout mechanism for infinite loop detection)
+Reported-by: Fengguang Wu <fengguang.wu@intel.com>
+References: https://lists.01.org/pipermail/lkp/2018-April/008406.html
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Cc: 4.16+ <stable@vger.kernel.org> # 4.16+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/osl.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -617,15 +617,18 @@ void acpi_os_stall(u32 us)
+ }
+
+ /*
+- * Support ACPI 3.0 AML Timer operand
+- * Returns 64-bit free-running, monotonically increasing timer
+- * with 100ns granularity
++ * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running,
++ * monotonically increasing timer with 100ns granularity. Do not use
++ * ktime_get() to implement this function because this function may get
++ * called after timekeeping has been suspended. Note: calling this function
++ * after timekeeping has been suspended may lead to unexpected results
++ * because when timekeeping is suspended the jiffies counter is not
++ * incremented. See also timekeeping_suspend().
+ */
+ u64 acpi_os_get_timer(void)
+ {
+- u64 time_ns = ktime_to_ns(ktime_get());
+- do_div(time_ns, 100);
+- return time_ns;
++ return (get_jiffies_64() - INITIAL_JIFFIES) *
++ (ACPI_100NSEC_PER_SEC / HZ);
+ }
+
+ acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
--- /dev/null
+From 4abb951b73ff0a8a979113ef185651aa3c8da19b Mon Sep 17 00:00:00 2001
+From: Erik Schmauss <erik.schmauss@intel.com>
+Date: Wed, 17 Oct 2018 14:09:35 -0700
+Subject: ACPICA: AML interpreter: add region addresses in global list during initialization
+
+From: Erik Schmauss <erik.schmauss@intel.com>
+
+commit 4abb951b73ff0a8a979113ef185651aa3c8da19b upstream.
+
+The table load process omitted adding the operation region address
+range to the global list. This omission is problematic because the OS
+queries the global list to check for address range conflicts before
+deciding which drivers to load. This commit may result in warning
+messages that look like the following:
+
+[ 7.871761] ACPI Warning: system_IO range 0x00000428-0x0000042F conflicts with op_region 0x00000400-0x0000047F (\PMIO) (20180531/utaddress-213)
+[ 7.871769] ACPI: If an ACPI driver is available for this device, you should use it instead of the native driver
+
+However, these messages do not signify regressions. It is a result of
+properly adding address ranges within the global address list.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=200011
+Tested-by: Jean-Marc Lenoir <archlinux@jihemel.com>
+Signed-off-by: Erik Schmauss <erik.schmauss@intel.com>
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/acpica/dsopcode.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/acpi/acpica/dsopcode.c
++++ b/drivers/acpi/acpica/dsopcode.c
+@@ -417,6 +417,10 @@ acpi_ds_eval_region_operands(struct acpi
+ ACPI_FORMAT_UINT64(obj_desc->region.address),
+ obj_desc->region.length));
+
++ status = acpi_ut_add_address_range(obj_desc->region.space_id,
++ obj_desc->region.address,
++ obj_desc->region.length, node);
++
+ /* Now the address and length are valid for this opregion */
+
+ obj_desc->region.flags |= AOPOBJ_DATA_VALID;
--- /dev/null
+From c64baa3a6fa207d112706bc5e7fd645cd8a8663f Mon Sep 17 00:00:00 2001
+From: Erik Schmauss <erik.schmauss@intel.com>
+Date: Wed, 17 Oct 2018 14:20:51 -0700
+Subject: ACPICA: AML Parser: fix parse loop to correctly skip erroneous extended opcodes
+
+From: Erik Schmauss <erik.schmauss@intel.com>
+
+commit c64baa3a6fa207d112706bc5e7fd645cd8a8663f upstream.
+
+AML opcodes come in two lengths: 1-byte opcodes and 2-byte, extended opcodes.
+If an error occurs due to illegal opcodes during table load, the AML parser
+needs to continue loading the table. In order to do this, it needs to skip
+parsing of the offending opcode and operands associated with that opcode.
+
+This change fixes the AML parse loop to correctly skip parsing of incorrect
+extended opcodes. Previously, only the short opcodes were skipped correctly.
+
+Signed-off-by: Erik Schmauss <erik.schmauss@intel.com>
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/acpica/psloop.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/drivers/acpi/acpica/psloop.c
++++ b/drivers/acpi/acpica/psloop.c
+@@ -417,6 +417,7 @@ acpi_status acpi_ps_parse_loop(struct ac
+ union acpi_parse_object *op = NULL; /* current op */
+ struct acpi_parse_state *parser_state;
+ u8 *aml_op_start = NULL;
++ u8 opcode_length;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
+
+@@ -540,8 +541,19 @@ acpi_status acpi_ps_parse_loop(struct ac
+ "Skip parsing opcode %s",
+ acpi_ps_get_opcode_name
+ (walk_state->opcode)));
++
++ /*
++ * Determine the opcode length before skipping the opcode.
++ * An opcode can be 1 byte or 2 bytes in length.
++ */
++ opcode_length = 1;
++ if ((walk_state->opcode & 0xFF00) ==
++ AML_EXTENDED_OPCODE) {
++ opcode_length = 2;
++ }
+ walk_state->parser_state.aml =
+- walk_state->aml + 1;
++ walk_state->aml + opcode_length;
++
+ walk_state->parser_state.aml =
+ acpi_ps_get_next_package_end
+ (&walk_state->parser_state);
--- /dev/null
+From 2e17a262a2371d38d2ec03614a2675a32cef9912 Mon Sep 17 00:00:00 2001
+From: Tang Junhui <tang.junhui.linux@gmail.com>
+Date: Mon, 8 Oct 2018 20:41:12 +0800
+Subject: bcache: correct dirty data statistics
+
+From: Tang Junhui <tang.junhui.linux@gmail.com>
+
+commit 2e17a262a2371d38d2ec03614a2675a32cef9912 upstream.
+
+When bcache device is clean, dirty keys may still exist after
+journal replay, so we need to count these dirty keys even
+device in clean status, otherwise after writeback, the amount
+of dirty data would be incorrect.
+
+Signed-off-by: Tang Junhui <tang.junhui.linux@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Coly Li <colyli@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bcache/super.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1148,11 +1148,12 @@ int bch_cached_dev_attach(struct cached_
+ }
+
+ if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+- bch_sectors_dirty_init(&dc->disk);
+ atomic_set(&dc->has_dirty, 1);
+ bch_writeback_queue(dc);
+ }
+
++ bch_sectors_dirty_init(&dc->disk);
++
+ bch_cached_dev_run(dc);
+ bcache_device_link(&dc->disk, c, "bdev");
+ atomic_inc(&c->attached_dev_nr);
--- /dev/null
+From dd0c91793b7c2658ea32c6b3a2247a8ceca45dc0 Mon Sep 17 00:00:00 2001
+From: Tang Junhui <tang.junhui.linux@gmail.com>
+Date: Mon, 8 Oct 2018 20:41:10 +0800
+Subject: bcache: fix ioctl in flash device
+
+From: Tang Junhui <tang.junhui.linux@gmail.com>
+
+commit dd0c91793b7c2658ea32c6b3a2247a8ceca45dc0 upstream.
+
+When doing ioctl in flash device, it will call ioctl_dev() in super.c,
+then we should not to get cached device since flash only device has
+no backend device. This patch just move the jugement dc->io_disable
+to cached_dev_ioctl() to make ioctl in flash device correctly.
+
+Fixes: 0f0709e6bfc3c ("bcache: stop bcache device when backing device is offline")
+Signed-off-by: Tang Junhui <tang.junhui.linux@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Coly Li <colyli@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bcache/request.c | 3 +++
+ drivers/md/bcache/super.c | 4 ----
+ 2 files changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -1218,6 +1218,9 @@ static int cached_dev_ioctl(struct bcach
+ {
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+
++ if (dc->io_disable)
++ return -EIO;
++
+ return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
+ }
+
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -643,10 +643,6 @@ static int ioctl_dev(struct block_device
+ unsigned int cmd, unsigned long arg)
+ {
+ struct bcache_device *d = b->bd_disk->private_data;
+- struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+-
+- if (dc->io_disable)
+- return -EIO;
+
+ return d->ioctl(d, mode, cmd, arg);
+ }
--- /dev/null
+From 2d6cb6edd2c7fb4f40998895bda45006281b1ac5 Mon Sep 17 00:00:00 2001
+From: Tang Junhui <tang.junhui.linux@gmail.com>
+Date: Mon, 8 Oct 2018 20:41:14 +0800
+Subject: bcache: fix miss key refill->end in writeback
+
+From: Tang Junhui <tang.junhui.linux@gmail.com>
+
+commit 2d6cb6edd2c7fb4f40998895bda45006281b1ac5 upstream.
+
+refill->end record the last key of writeback, for example, at the first
+time, keys (1,128K) to (1,1024K) are flush to the backend device, but
+the end key (1,1024K) is not included, since the bellow code:
+ if (bkey_cmp(k, refill->end) >= 0) {
+ ret = MAP_DONE;
+ goto out;
+ }
+And in the next time when we refill writeback keybuf again, we searched
+key start from (1,1024K), and got a key bigger than it, so the key
+(1,1024K) missed.
+This patch modify the above code, and let the end key to be included to
+the writeback key buffer.
+
+Signed-off-by: Tang Junhui <tang.junhui.linux@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Coly Li <colyli@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bcache/btree.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -2434,7 +2434,7 @@ static int refill_keybuf_fn(struct btree
+ struct keybuf *buf = refill->buf;
+ int ret = MAP_CONTINUE;
+
+- if (bkey_cmp(k, refill->end) >= 0) {
++ if (bkey_cmp(k, refill->end) > 0) {
+ ret = MAP_DONE;
+ goto out;
+ }
--- /dev/null
+From 502b291568fc7faf1ebdb2c2590f12851db0ff76 Mon Sep 17 00:00:00 2001
+From: Tang Junhui <tang.junhui.linux@gmail.com>
+Date: Mon, 8 Oct 2018 20:41:08 +0800
+Subject: bcache: trace missed reading by cache_missed
+
+From: Tang Junhui <tang.junhui.linux@gmail.com>
+
+commit 502b291568fc7faf1ebdb2c2590f12851db0ff76 upstream.
+
+Missed reading IOs are identified by s->cache_missed, not the
+s->cache_miss, so in trace_bcache_read() using trace_bcache_read
+to identify whether the IO is missed or not.
+
+Signed-off-by: Tang Junhui <tang.junhui.linux@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Coly Li <colyli@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bcache/request.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -850,7 +850,7 @@ static void cached_dev_read_done_bh(stru
+
+ bch_mark_cache_accounting(s->iop.c, s->d,
+ !s->cache_missed, s->iop.bypass);
+- trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
++ trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
+
+ if (s->iop.status)
+ continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
--- /dev/null
+From 1adfc5e4136f5967d591c399aff95b3b035f16b7 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Mon, 29 Oct 2018 20:57:17 +0800
+Subject: block: make sure discard bio is aligned with logical block size
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit 1adfc5e4136f5967d591c399aff95b3b035f16b7 upstream.
+
+Obviously the created discard bio has to be aligned with logical block size.
+
+This patch introduces the helper of bio_allowed_max_sectors() for
+this purpose.
+
+Cc: stable@vger.kernel.org
+Cc: Mike Snitzer <snitzer@redhat.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Xiao Ni <xni@redhat.com>
+Cc: Mariusz Dabrowski <mariusz.dabrowski@intel.com>
+Fixes: 744889b7cbb56a6 ("block: don't deal with discard limit in blkdev_issue_discard()")
+Fixes: a22c4d7e34402cc ("block: re-add discard_granularity and alignment checks")
+Reported-by: Rui Salvaterra <rsalvaterra@gmail.com>
+Tested-by: Rui Salvaterra <rsalvaterra@gmail.com>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-lib.c | 3 +--
+ block/blk-merge.c | 3 ++-
+ block/blk.h | 10 ++++++++++
+ 3 files changed, 13 insertions(+), 3 deletions(-)
+
+--- a/block/blk-lib.c
++++ b/block/blk-lib.c
+@@ -58,8 +58,7 @@ int __blkdev_issue_discard(struct block_
+
+ if (!req_sects)
+ goto fail;
+- if (req_sects > UINT_MAX >> 9)
+- req_sects = UINT_MAX >> 9;
++ req_sects = min(req_sects, bio_allowed_max_sectors(q));
+
+ end_sect = sector + req_sects;
+
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -27,7 +27,8 @@ static struct bio *blk_bio_discard_split
+ /* Zero-sector (unknown) and one-sector granularities are the same. */
+ granularity = max(q->limits.discard_granularity >> 9, 1U);
+
+- max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
++ max_discard_sectors = min(q->limits.max_discard_sectors,
++ bio_allowed_max_sectors(q));
+ max_discard_sectors -= max_discard_sectors % granularity;
+
+ if (unlikely(!max_discard_sectors)) {
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -329,6 +329,16 @@ static inline unsigned long blk_rq_deadl
+ }
+
+ /*
++ * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
++ * is defined as 'unsigned int', meantime it has to aligned to with logical
++ * block size which is the minimum accepted unit by hardware.
++ */
++static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
++{
++ return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
++}
++
++/*
+ * Internal io_context interface
+ */
+ void get_io_context(struct io_context *ioc);
--- /dev/null
+From 34ffec60b27aa81d04e274e71e4c6ef740f75fc7 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Mon, 29 Oct 2018 20:57:19 +0800
+Subject: block: make sure writesame bio is aligned with logical block size
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit 34ffec60b27aa81d04e274e71e4c6ef740f75fc7 upstream.
+
+Obviously the created writesame bio has to be aligned with logical block
+size, and use bio_allowed_max_sectors() to retrieve this number.
+
+Cc: stable@vger.kernel.org
+Cc: Mike Snitzer <snitzer@redhat.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Xiao Ni <xni@redhat.com>
+Cc: Mariusz Dabrowski <mariusz.dabrowski@intel.com>
+Fixes: b49a0871be31a745b2ef ("block: remove split code in blkdev_issue_{discard,write_same}")
+Tested-by: Rui Salvaterra <rsalvaterra@gmail.com>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-lib.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/block/blk-lib.c
++++ b/block/blk-lib.c
+@@ -161,7 +161,7 @@ static int __blkdev_issue_write_same(str
+ return -EOPNOTSUPP;
+
+ /* Ensure that max_write_same_sectors doesn't overflow bi_size */
+- max_write_same_sectors = UINT_MAX >> 9;
++ max_write_same_sectors = bio_allowed_max_sectors(q);
+
+ while (nr_sects) {
+ bio = next_bio(bio, 1, gfp_mask);
--- /dev/null
+From 52990a5fb0c991ecafebdab43138b5ed41376852 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sun, 21 Oct 2018 12:02:36 -0600
+Subject: block: setup bounce bio_sets properly
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 52990a5fb0c991ecafebdab43138b5ed41376852 upstream.
+
+We're only setting up the bounce bio sets if we happen
+to need bouncing for regular HIGHMEM, not if we only need
+it for ISA devices.
+
+Protect the ISA bounce setup with a mutex, since it's
+being invoked from driver init functions and can thus be
+called in parallel.
+
+Cc: stable@vger.kernel.org
+Reported-by: Ondrej Zary <linux@rainbow-software.org>
+Tested-by: Ondrej Zary <linux@rainbow-software.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bounce.c | 37 ++++++++++++++++++++++++++++---------
+ 1 file changed, 28 insertions(+), 9 deletions(-)
+
+--- a/block/bounce.c
++++ b/block/bounce.c
+@@ -31,6 +31,24 @@
+ static struct bio_set bounce_bio_set, bounce_bio_split;
+ static mempool_t page_pool, isa_page_pool;
+
++static void init_bounce_bioset(void)
++{
++ static bool bounce_bs_setup;
++ int ret;
++
++ if (bounce_bs_setup)
++ return;
++
++ ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
++ BUG_ON(ret);
++ if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
++ BUG_ON(1);
++
++ ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
++ BUG_ON(ret);
++ bounce_bs_setup = true;
++}
++
+ #if defined(CONFIG_HIGHMEM)
+ static __init int init_emergency_pool(void)
+ {
+@@ -44,14 +62,7 @@ static __init int init_emergency_pool(vo
+ BUG_ON(ret);
+ pr_info("pool size: %d pages\n", POOL_SIZE);
+
+- ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+- BUG_ON(ret);
+- if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
+- BUG_ON(1);
+-
+- ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
+- BUG_ON(ret);
+-
++ init_bounce_bioset();
+ return 0;
+ }
+
+@@ -86,6 +97,8 @@ static void *mempool_alloc_pages_isa(gfp
+ return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
+ }
+
++static DEFINE_MUTEX(isa_mutex);
++
+ /*
+ * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
+ * as the max address, so check if the pool has already been created.
+@@ -94,14 +107,20 @@ int init_emergency_isa_pool(void)
+ {
+ int ret;
+
+- if (mempool_initialized(&isa_page_pool))
++ mutex_lock(&isa_mutex);
++
++ if (mempool_initialized(&isa_page_pool)) {
++ mutex_unlock(&isa_mutex);
+ return 0;
++ }
+
+ ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa,
+ mempool_free_pages, (void *) 0);
+ BUG_ON(ret);
+
+ pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
++ init_bounce_bioset();
++ mutex_unlock(&isa_mutex);
+ return 0;
+ }
+
--- /dev/null
+From da5e79bc70b84971d2b3a55fb252e34e51d81d48 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Mon, 15 Oct 2018 23:21:05 +0200
+Subject: cpufreq: conservative: Take limits changes into account properly
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit da5e79bc70b84971d2b3a55fb252e34e51d81d48 upstream.
+
+If the policy limits change between invocations of cs_dbs_update(),
+the requested frequency value stored in dbs_info may not be updated
+and the function may use a stale value of it next time. Moreover, if
+idle periods are takem into account by cs_dbs_update(), the requested
+frequency value stored in dbs_info may be below the min policy limit,
+which is incorrect.
+
+To fix these problems, always update the requested frequency value
+in dbs_info along with the local copy of it when the previous
+requested frequency is beyond the policy limits and avoid decreasing
+the requested frequency below the min policy limit when taking
+idle periods into account.
+
+Fixes: abb6627910a1 (cpufreq: conservative: Fix next frequency selection)
+Fixes: 00bfe05889e9 (cpufreq: conservative: Decrease frequency faster for deferred updates)
+Reported-by: Waldemar Rymarkiewicz <waldemarx.rymarkiewicz@intel.com>
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Waldemar Rymarkiewicz <waldemarx.rymarkiewicz@intel.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq_conservative.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq_conservative.c
++++ b/drivers/cpufreq/cpufreq_conservative.c
+@@ -80,8 +80,10 @@ static unsigned int cs_dbs_update(struct
+ * changed in the meantime, so fall back to current frequency in that
+ * case.
+ */
+- if (requested_freq > policy->max || requested_freq < policy->min)
++ if (requested_freq > policy->max || requested_freq < policy->min) {
+ requested_freq = policy->cur;
++ dbs_info->requested_freq = requested_freq;
++ }
+
+ freq_step = get_freq_step(cs_tuners, policy);
+
+@@ -92,7 +94,7 @@ static unsigned int cs_dbs_update(struct
+ if (policy_dbs->idle_periods < UINT_MAX) {
+ unsigned int freq_steps = policy_dbs->idle_periods * freq_step;
+
+- if (requested_freq > freq_steps)
++ if (requested_freq > policy->min + freq_steps)
+ requested_freq -= freq_steps;
+ else
+ requested_freq = policy->min;
--- /dev/null
+From a3ceed87b07769fb80ce9dc6b604e515dba14c4b Mon Sep 17 00:00:00 2001
+From: He Zhe <zhe.he@windriver.com>
+Date: Mon, 17 Sep 2018 11:24:20 +0800
+Subject: dma-mapping: fix panic caused by passing empty cma command line argument
+
+From: He Zhe <zhe.he@windriver.com>
+
+commit a3ceed87b07769fb80ce9dc6b604e515dba14c4b upstream.
+
+early_cma does not check input argument before passing it to
+simple_strtoull. The argument would be a NULL pointer if "cma", without
+its value, is set in command line and thus causes the following panic.
+
+PANIC: early exception 0xe3 IP 10:ffffffffa3e9db8d error 0 cr2 0x0
+[ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 4.19.0-rc3-yocto-standard+ #7
+[ 0.000000] RIP: 0010:_parse_integer_fixup_radix+0xd/0x70
+...
+[ 0.000000] Call Trace:
+[ 0.000000] simple_strtoull+0x29/0x70
+[ 0.000000] memparse+0x26/0x90
+[ 0.000000] early_cma+0x17/0x6a
+[ 0.000000] do_early_param+0x57/0x8e
+[ 0.000000] parse_args+0x208/0x320
+[ 0.000000] ? rdinit_setup+0x30/0x30
+[ 0.000000] parse_early_options+0x29/0x2d
+[ 0.000000] ? rdinit_setup+0x30/0x30
+[ 0.000000] parse_early_param+0x36/0x4d
+[ 0.000000] setup_arch+0x336/0x99e
+[ 0.000000] start_kernel+0x6f/0x4e6
+[ 0.000000] x86_64_start_reservations+0x24/0x26
+[ 0.000000] x86_64_start_kernel+0x6f/0x72
+[ 0.000000] secondary_startup_64+0xa4/0xb0
+
+This patch adds a check to prevent the panic.
+
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+Reviewed-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/dma/contiguous.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/kernel/dma/contiguous.c
++++ b/kernel/dma/contiguous.c
+@@ -49,7 +49,11 @@ static phys_addr_t limit_cmdline;
+
+ static int __init early_cma(char *p)
+ {
+- pr_debug("%s(%s)\n", __func__, p);
++ if (!p) {
++ pr_err("Config string not provided\n");
++ return -EINVAL;
++ }
++
+ size_cmdline = memparse(p, &p);
+ if (*p != '@')
+ return 0;
--- /dev/null
+From e7c6a55606b5c46b449d76588968b4d8caae903f Mon Sep 17 00:00:00 2001
+From: Dmitry Bazhenov <bazhenov.dn@gmail.com>
+Date: Mon, 15 Oct 2018 14:21:22 +0500
+Subject: hwmon: (pmbus) Fix page count auto-detection.
+
+From: Dmitry Bazhenov <bazhenov.dn@gmail.com>
+
+commit e7c6a55606b5c46b449d76588968b4d8caae903f upstream.
+
+Devices with compatible="pmbus" field have zero initial page count,
+and pmbus_clear_faults() being called before the page count auto-
+detection does not actually clear faults because it depends on the
+page count. Non-cleared faults in its turn may fail the subsequent
+page count auto-detection.
+
+This patch fixes this problem by calling pmbus_clear_fault_page()
+for currently set page and calling pmbus_clear_faults() after the
+page count was detected.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Bazhenov <bazhenov.dn@gmail.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwmon/pmbus/pmbus.c | 2 ++
+ drivers/hwmon/pmbus/pmbus_core.c | 5 ++++-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/hwmon/pmbus/pmbus.c
++++ b/drivers/hwmon/pmbus/pmbus.c
+@@ -118,6 +118,8 @@ static int pmbus_identify(struct i2c_cli
+ } else {
+ info->pages = 1;
+ }
++
++ pmbus_clear_faults(client);
+ }
+
+ if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -2015,7 +2015,10 @@ static int pmbus_init_common(struct i2c_
+ if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
+ client->flags |= I2C_CLIENT_PEC;
+
+- pmbus_clear_faults(client);
++ if (data->info->pages)
++ pmbus_clear_faults(client);
++ else
++ pmbus_clear_fault_page(client, -1);
+
+ if (info->identify) {
+ ret = (*info->identify)(client, info);
--- /dev/null
+From 0711e8c1b4572d076264e71b0002d223f2666ed7 Mon Sep 17 00:00:00 2001
+From: Jan Glauber <jglauber@cavium.com>
+Date: Thu, 11 Oct 2018 12:13:01 +0200
+Subject: ipmi: Fix timer race with module unload
+
+From: Jan Glauber <jglauber@cavium.com>
+
+commit 0711e8c1b4572d076264e71b0002d223f2666ed7 upstream.
+
+Please note that below oops is from an older kernel, but the same
+race seems to be present in the upstream kernel too.
+
+---8<---
+
+The following panic was encountered during removing the ipmi_ssif
+module:
+
+[ 526.352555] Unable to handle kernel paging request at virtual address ffff000006923090
+[ 526.360464] Mem abort info:
+[ 526.363257] ESR = 0x86000007
+[ 526.366304] Exception class = IABT (current EL), IL = 32 bits
+[ 526.372221] SET = 0, FnV = 0
+[ 526.375269] EA = 0, S1PTW = 0
+[ 526.378405] swapper pgtable: 4k pages, 48-bit VAs, pgd = 000000008ae60416
+[ 526.385185] [ffff000006923090] *pgd=000000bffcffe803, *pud=000000bffcffd803, *pmd=0000009f4731a003, *pte=0000000000000000
+[ 526.396141] Internal error: Oops: 86000007 [#1] SMP
+[ 526.401008] Modules linked in: nls_iso8859_1 ipmi_devintf joydev input_leds ipmi_msghandler shpchp sch_fq_codel ib_iser rdma_cm iw_cm ib_cm ib_core iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi ip_tables x_tables autofs4 btrfs zstd_compress raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx xor raid6_pq libcrc32c raid1 raid0 multipath linear i2c_smbus hid_generic usbhid uas hid usb_storage ast aes_ce_blk i2c_algo_bit aes_ce_cipher qede ttm crc32_ce ptp crct10dif_ce drm_kms_helper ghash_ce syscopyarea sha2_ce sysfillrect sysimgblt pps_core fb_sys_fops sha256_arm64 sha1_ce mpt3sas qed drm raid_class ahci scsi_transport_sas libahci gpio_xlp i2c_xlp9xx aes_neon_bs aes_neon_blk crypto_simd cryptd aes_arm64 [last unloaded: ipmi_ssif]
+[ 526.468085] CPU: 125 PID: 0 Comm: swapper/125 Not tainted 4.15.0-35-generic #38~lp1775396+build.1
+[ 526.476942] Hardware name: To be filled by O.E.M. Saber/Saber, BIOS 0ACKL022 08/14/2018
+[ 526.484932] pstate: 00400009 (nzcv daif +PAN -UAO)
+[ 526.489713] pc : 0xffff000006923090
+[ 526.493198] lr : call_timer_fn+0x34/0x178
+[ 526.497194] sp : ffff000009b0bdd0
+[ 526.500496] x29: ffff000009b0bdd0 x28: 0000000000000082
+[ 526.505796] x27: 0000000000000002 x26: ffff000009515188
+[ 526.511096] x25: ffff000009515180 x24: ffff0000090f1018
+[ 526.516396] x23: ffff000009519660 x22: dead000000000200
+[ 526.521696] x21: ffff000006923090 x20: 0000000000000100
+[ 526.526995] x19: ffff809eeb466a40 x18: 0000000000000000
+[ 526.532295] x17: 000000000000000e x16: 0000000000000007
+[ 526.537594] x15: 0000000000000000 x14: 071c71c71c71c71c
+[ 526.542894] x13: 0000000000000000 x12: 0000000000000000
+[ 526.548193] x11: 0000000000000001 x10: ffff000009b0be88
+[ 526.553493] x9 : 0000000000000000 x8 : 0000000000000005
+[ 526.558793] x7 : ffff80befc1f8528 x6 : 0000000000000020
+[ 526.564092] x5 : 0000000000000040 x4 : 0000000020001b20
+[ 526.569392] x3 : 0000000000000000 x2 : ffff809eeb466a40
+[ 526.574692] x1 : ffff000006923090 x0 : ffff809eeb466a40
+[ 526.579992] Process swapper/125 (pid: 0, stack limit = 0x000000002eb50acc)
+[ 526.586854] Call trace:
+[ 526.589289] 0xffff000006923090
+[ 526.592419] expire_timers+0xc8/0x130
+[ 526.596070] run_timer_softirq+0xec/0x1b0
+[ 526.600070] __do_softirq+0x134/0x328
+[ 526.603726] irq_exit+0xc8/0xe0
+[ 526.606857] __handle_domain_irq+0x6c/0xc0
+[ 526.610941] gic_handle_irq+0x84/0x188
+[ 526.614679] el1_irq+0xe8/0x180
+[ 526.617822] cpuidle_enter_state+0xa0/0x328
+[ 526.621993] cpuidle_enter+0x34/0x48
+[ 526.625564] call_cpuidle+0x44/0x70
+[ 526.629040] do_idle+0x1b8/0x1f0
+[ 526.632256] cpu_startup_entry+0x2c/0x30
+[ 526.636174] secondary_start_kernel+0x11c/0x130
+[ 526.640694] Code: bad PC value
+[ 526.643800] ---[ end trace d020b0b8417c2498 ]---
+[ 526.648404] Kernel panic - not syncing: Fatal exception in interrupt
+[ 526.654778] SMP: stopping secondary CPUs
+[ 526.658734] Kernel Offset: disabled
+[ 526.662211] CPU features: 0x5800c38
+[ 526.665688] Memory Limit: none
+[ 526.668768] ---[ end Kernel panic - not syncing: Fatal exception in interrupt
+
+Prevent mod_timer from arming a timer that was already removed by
+del_timer during module unload.
+
+Signed-off-by: Jan Glauber <jglauber@cavium.com>
+Cc: <stable@vger.kernel.org> # 3.19
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/ipmi/ipmi_ssif.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -606,8 +606,9 @@ static void msg_done_handler(struct ssif
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ ssif_info->waiting_alert = true;
+ ssif_info->rtc_us_timer = SSIF_MSG_USEC;
+- mod_timer(&ssif_info->retry_timer,
+- jiffies + SSIF_MSG_JIFFIES);
++ if (!ssif_info->stopping)
++ mod_timer(&ssif_info->retry_timer,
++ jiffies + SSIF_MSG_JIFFIES);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+@@ -939,8 +940,9 @@ static void msg_written_handler(struct s
+ ssif_info->waiting_alert = true;
+ ssif_info->retries_left = SSIF_RECV_RETRIES;
+ ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
+- mod_timer(&ssif_info->retry_timer,
+- jiffies + SSIF_MSG_PART_JIFFIES);
++ if (!ssif_info->stopping)
++ mod_timer(&ssif_info->retry_timer,
++ jiffies + SSIF_MSG_PART_JIFFIES);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+ }
--- /dev/null
+From 92e2921f7eee63450a5f953f4b15dc6210219430 Mon Sep 17 00:00:00 2001
+From: Hou Tao <houtao1@huawei.com>
+Date: Sat, 6 Oct 2018 17:09:35 +0800
+Subject: jffs2: free jffs2_sb_info through jffs2_kill_sb()
+
+From: Hou Tao <houtao1@huawei.com>
+
+commit 92e2921f7eee63450a5f953f4b15dc6210219430 upstream.
+
+When an invalid mount option is passed to jffs2, jffs2_parse_options()
+will fail and jffs2_sb_info will be freed, but then jffs2_sb_info will
+be used (use-after-free) and freeed (double-free) in jffs2_kill_sb().
+
+Fix it by removing the buggy invocation of kfree() when getting invalid
+mount options.
+
+Fixes: 92abc475d8de ("jffs2: implement mount option parsing and compression overriding")
+Cc: stable@kernel.org
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Reviewed-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/jffs2/super.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -285,10 +285,8 @@ static int jffs2_fill_super(struct super
+ sb->s_fs_info = c;
+
+ ret = jffs2_parse_options(c, data);
+- if (ret) {
+- kfree(c);
++ if (ret)
+ return -EINVAL;
+- }
+
+ /* Initialize JFFS2 superblock locks, the further initialization will
+ * be done later */
--- /dev/null
+From 2e62024c265aa69315ed02835623740030435380 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Sat, 20 Oct 2018 18:47:53 +0900
+Subject: kprobes/x86: Use preempt_enable() in optimized_callback()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 2e62024c265aa69315ed02835623740030435380 upstream.
+
+The following commit:
+
+ a19b2e3d7839 ("kprobes/x86: Remove IRQ disabling from ftrace-based/optimized kprobes”)
+
+removed local_irq_save/restore() from optimized_callback(), the handler
+might be interrupted by the rescheduling interrupt and might be
+rescheduled - so we must not use the preempt_enable_no_resched() macro.
+
+Use preempt_enable() instead, to not lose preemption events.
+
+[ mingo: Improved the changelog. ]
+
+Reported-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@vger.kernel.org>
+Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: dwmw@amazon.co.uk
+Fixes: a19b2e3d7839 ("kprobes/x86: Remove IRQ disabling from ftrace-based/optimized kprobes”)
+Link: http://lkml.kernel.org/r/154002887331.7627.10194920925792947001.stgit@devbox
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/kprobes/opt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -179,7 +179,7 @@ optimized_callback(struct optimized_kpro
+ opt_pre_handler(&op->kp, regs);
+ __this_cpu_write(current_kprobe, NULL);
+ }
+- preempt_enable_no_resched();
++ preempt_enable();
+ }
+ NOKPROBE_SYMBOL(optimized_callback);
+
--- /dev/null
+From afd0b1fb22269f48d68fdf269891c653818c8047 Mon Sep 17 00:00:00 2001
+From: David Arcari <darcari@redhat.com>
+Date: Mon, 27 Aug 2018 15:19:08 -0400
+Subject: mailbox: PCC: handle parse error
+
+From: David Arcari <darcari@redhat.com>
+
+commit afd0b1fb22269f48d68fdf269891c653818c8047 upstream.
+
+acpi_pcc_probe() calls acpi_table_parse_entries_array() but fails
+to check for an error return. This in turn can result in calling
+kcalloc() with a negative count as well as emitting the following
+misleading erorr message:
+
+[ 2.642015] Could not allocate space for PCC mbox channels
+
+Fixes: 8f8027c5f935 (mailbox: PCC: erroneous error message when parsing ACPI PCCT)
+Signed-off-by: David Arcari <darcari@redhat.com>
+Reviewed-by: Al Stone <ahs3@redhat.com>
+Cc: 4.18+ <stable@vger.kernel.org> # 4.18+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mailbox/pcc.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/mailbox/pcc.c
++++ b/drivers/mailbox/pcc.c
+@@ -461,8 +461,11 @@ static int __init acpi_pcc_probe(void)
+ count = acpi_table_parse_entries_array(ACPI_SIG_PCCT,
+ sizeof(struct acpi_table_pcct), proc,
+ ACPI_PCCT_TYPE_RESERVED, MAX_PCC_SUBSPACES);
+- if (count == 0 || count > MAX_PCC_SUBSPACES) {
+- pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
++ if (count <= 0 || count > MAX_PCC_SUBSPACES) {
++ if (count < 0)
++ pr_warn("Error parsing PCC subspaces from PCCT\n");
++ else
++ pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
+ return -EINVAL;
+ }
+
--- /dev/null
+From 6c925b333368cda4e1b0513b07f72316c0e7edd7 Mon Sep 17 00:00:00 2001
+From: Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
+Date: Thu, 4 Oct 2018 15:01:04 +0200
+Subject: mtd: maps: gpio-addr-flash: Fix ioremapped size
+
+From: Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
+
+commit 6c925b333368cda4e1b0513b07f72316c0e7edd7 upstream.
+
+We should only iomap the area of the chip that is memory mapped.
+Otherwise we could be mapping devices beyond the memory space or that
+belong to other devices.
+
+Signed-off-by: Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
+Fixes: ebd71e3a4861 ("mtd: maps: gpio-addr-flash: fix warnings and make more portable")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/maps/gpio-addr-flash.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/maps/gpio-addr-flash.c
++++ b/drivers/mtd/maps/gpio-addr-flash.c
+@@ -238,7 +238,7 @@ static int gpio_flash_probe(struct platf
+ state->map.copy_to = gf_copy_to;
+ state->map.bankwidth = pdata->width;
+ state->map.size = state->win_size * (1 << state->gpio_count);
+- state->map.virt = ioremap_nocache(memory->start, state->map.size);
++ state->map.virt = ioremap_nocache(memory->start, state->win_size);
+ if (!state->map.virt)
+ return -ENOMEM;
+
--- /dev/null
+From 53c83b59759c1ee213f5ffa194909daee8902a28 Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Wed, 3 Oct 2018 11:05:04 +0200
+Subject: mtd: rawnand: marvell: fix the IRQ handler complete() condition
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit 53c83b59759c1ee213f5ffa194909daee8902a28 upstream.
+
+With the current implementation, the complete() in the IRQ handler is
+supposed to be called only if the register status has one or the other
+RDY bit set. Other events might trigger an interrupt as well if
+enabled, but should not end-up with a complete() call.
+
+For this purpose, the code was checking if the other bits were set, in
+this case complete() was not called. This is wrong as two events might
+happen in a very tight time-frame and if the NDSR status read reports
+two bits set (eg. RDY(0) and RDDREQ) at the same time, complete() was
+not called.
+
+This logic would lead to timeouts in marvell_nfc_wait_op() and has
+been observed on PXA boards (NFCv1) in the Hamming write path.
+
+Fixes: 02f26ecf8c77 ("mtd: nand: add reworked Marvell NAND controller driver")
+Cc: stable@vger.kernel.org
+Reported-by: Daniel Mack <daniel@zonque.org>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Tested-by: Daniel Mack <daniel@zonque.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/raw/marvell_nand.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/raw/marvell_nand.c
++++ b/drivers/mtd/nand/raw/marvell_nand.c
+@@ -686,7 +686,7 @@ static irqreturn_t marvell_nfc_isr(int i
+
+ marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
+
+- if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ)))
++ if (st & (NDSR_RDY(0) | NDSR_RDY(1)))
+ complete(&nfc->complete);
+
+ return IRQ_HANDLED;
--- /dev/null
+From 000412276370a9bcfec73b3752ceefd9a927f1db Mon Sep 17 00:00:00 2001
+From: Ahmad Fatoum <a.fatoum@pengutronix.de>
+Date: Fri, 21 Sep 2018 11:32:53 +0200
+Subject: mtd: spi-nor: fsl-quadspi: Don't let -EINVAL on the bus
+
+From: Ahmad Fatoum <a.fatoum@pengutronix.de>
+
+commit 000412276370a9bcfec73b3752ceefd9a927f1db upstream.
+
+fsl_qspi_get_seqid() may return -EINVAL, but fsl_qspi_init_ahb_read()
+doesn't check for error codes with the result that -EINVAL could find
+itself signalled over the bus.
+
+In conjunction with the LS1046A SoC's A-009283 errata
+("Illegal accesses to SPI flash memory can result in a system hang")
+this illegal access to SPI flash memory results in a system hang
+if userspace attempts reading later on.
+
+Avoid this by always checking fsl_qspi_get_seqid()'s return value
+and bail out otherwise.
+
+Fixes: e46ecda764dc ("mtd: spi-nor: Add Freescale QuadSPI driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/spi-nor/fsl-quadspi.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/mtd/spi-nor/fsl-quadspi.c
++++ b/drivers/mtd/spi-nor/fsl-quadspi.c
+@@ -544,6 +544,9 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 c
+
+ /* trigger the LUT now */
+ seqid = fsl_qspi_get_seqid(q, cmd);
++ if (seqid < 0)
++ return seqid;
++
+ qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len,
+ base + QUADSPI_IPCR);
+
+@@ -672,7 +675,7 @@ static void fsl_qspi_set_map_addr(struct
+ * causes the controller to clear the buffer, and use the sequence pointed
+ * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
+ */
+-static void fsl_qspi_init_ahb_read(struct fsl_qspi *q)
++static int fsl_qspi_init_ahb_read(struct fsl_qspi *q)
+ {
+ void __iomem *base = q->iobase;
+ int seqid;
+@@ -697,8 +700,13 @@ static void fsl_qspi_init_ahb_read(struc
+
+ /* Set the default lut sequence for AHB Read. */
+ seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
++ if (seqid < 0)
++ return seqid;
++
+ qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
+ q->iobase + QUADSPI_BFGENCR);
++
++ return 0;
+ }
+
+ /* This function was used to prepare and enable QSPI clock */
+@@ -806,9 +814,7 @@ static int fsl_qspi_nor_setup_last(struc
+ fsl_qspi_init_lut(q);
+
+ /* Init for AHB read */
+- fsl_qspi_init_ahb_read(q);
+-
+- return 0;
++ return fsl_qspi_init_ahb_read(q);
+ }
+
+ static const struct of_device_id fsl_qspi_dt_ids[] = {
--- /dev/null
+From 41fe242979e463d6ad251077ded01b825a330b7e Mon Sep 17 00:00:00 2001
+From: Liu Xiang <liu.xiang6@zte.com.cn>
+Date: Tue, 28 Aug 2018 22:32:57 +0800
+Subject: mtd: spi-nor: fsl-quadspi: fix read error for flash size larger than 16MB
+
+From: Liu Xiang <liu.xiang6@zte.com.cn>
+
+commit 41fe242979e463d6ad251077ded01b825a330b7e upstream.
+
+If the size of spi-nor flash is larger than 16MB, the read_opcode
+is set to SPINOR_OP_READ_1_1_4_4B, and fsl_qspi_get_seqid() will
+return -EINVAL when cmd is SPINOR_OP_READ_1_1_4_4B. This can
+cause read operation fail.
+
+Fixes: e46ecda764dc ("mtd: spi-nor: Add Freescale QuadSPI driver")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Liu Xiang <liu.xiang6@zte.com.cn>
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/spi-nor/fsl-quadspi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/spi-nor/fsl-quadspi.c
++++ b/drivers/mtd/spi-nor/fsl-quadspi.c
+@@ -478,6 +478,7 @@ static int fsl_qspi_get_seqid(struct fsl
+ {
+ switch (cmd) {
+ case SPINOR_OP_READ_1_1_4:
++ case SPINOR_OP_READ_1_1_4_4B:
+ return SEQID_READ;
+ case SPINOR_OP_WREN:
+ return SEQID_WREN;
--- /dev/null
+From 42460c31ae96cbad5ae226ee6c10bd8d70d764ae Mon Sep 17 00:00:00 2001
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+Date: Thu, 30 Aug 2018 11:42:57 +0300
+Subject: mtd: spi-nor: intel-spi: Add support for Intel Ice Lake SPI serial flash
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+commit 42460c31ae96cbad5ae226ee6c10bd8d70d764ae upstream.
+
+Intel Ice Lake exposes the SPI serial flash controller as a PCI device
+in the same way than Intel Denverton. Add Ice Lake SPI serial flash PCI
+ID to the driver list of supported devices.
+
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Acked-by: Marek Vasut <marek.vasut@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/spi-nor/intel-spi-pci.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/spi-nor/intel-spi-pci.c
++++ b/drivers/mtd/spi-nor/intel-spi-pci.c
+@@ -65,6 +65,7 @@ static void intel_spi_pci_remove(struct
+ static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
++ { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
+ { },
--- /dev/null
+From 95691e3eddc41da2d1cd3cca51fecdfb46bd85bc Mon Sep 17 00:00:00 2001
+From: "Maciej S. Szmigiero" <mail@maciej.szmigiero.name>
+Date: Sun, 9 Sep 2018 01:21:06 +0200
+Subject: pcmcia: Implement CLKRUN protocol disabling for Ricoh bridges
+
+From: Maciej S. Szmigiero <mail@maciej.szmigiero.name>
+
+commit 95691e3eddc41da2d1cd3cca51fecdfb46bd85bc upstream.
+
+Currently, "disable_clkrun" yenta_socket module parameter is only
+implemented for TI CardBus bridges.
+Add also an implementation for Ricoh bridges that have the necessary
+setting documented in publicly available datasheets.
+
+Tested on a RL5C476II with a Sunrich C-160 CardBus NIC that doesn't work
+correctly unless the CLKRUN protocol is disabled.
+
+Let's also make it clear in its description that the "disable_clkrun"
+module parameter only works on these two previously mentioned brands of
+CardBus bridges.
+
+Signed-off-by: Maciej S. Szmigiero <mail@maciej.szmigiero.name>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pcmcia/ricoh.h | 35 +++++++++++++++++++++++++++++++++++
+ drivers/pcmcia/yenta_socket.c | 3 ++-
+ 2 files changed, 37 insertions(+), 1 deletion(-)
+
+--- a/drivers/pcmcia/ricoh.h
++++ b/drivers/pcmcia/ricoh.h
+@@ -119,6 +119,10 @@
+ #define RL5C4XX_MISC_CONTROL 0x2F /* 8 bit */
+ #define RL5C4XX_ZV_ENABLE 0x08
+
++/* Misc Control 3 Register */
++#define RL5C4XX_MISC3 0x00A2 /* 16 bit */
++#define RL5C47X_MISC3_CB_CLKRUN_DIS BIT(1)
++
+ #ifdef __YENTA_H
+
+ #define rl_misc(socket) ((socket)->private[0])
+@@ -156,6 +160,35 @@ static void ricoh_set_zv(struct yenta_so
+ }
+ }
+
++static void ricoh_set_clkrun(struct yenta_socket *socket, bool quiet)
++{
++ u16 misc3;
++
++ /*
++ * RL5C475II likely has this setting, too, however no datasheet
++ * is publicly available for this chip
++ */
++ if (socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C476 &&
++ socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C478)
++ return;
++
++ if (socket->dev->revision < 0x80)
++ return;
++
++ misc3 = config_readw(socket, RL5C4XX_MISC3);
++ if (misc3 & RL5C47X_MISC3_CB_CLKRUN_DIS) {
++ if (!quiet)
++ dev_dbg(&socket->dev->dev,
++ "CLKRUN feature already disabled\n");
++ } else if (disable_clkrun) {
++ if (!quiet)
++ dev_info(&socket->dev->dev,
++ "Disabling CLKRUN feature\n");
++ misc3 |= RL5C47X_MISC3_CB_CLKRUN_DIS;
++ config_writew(socket, RL5C4XX_MISC3, misc3);
++ }
++}
++
+ static void ricoh_save_state(struct yenta_socket *socket)
+ {
+ rl_misc(socket) = config_readw(socket, RL5C4XX_MISC);
+@@ -172,6 +205,7 @@ static void ricoh_restore_state(struct y
+ config_writew(socket, RL5C4XX_16BIT_IO_0, rl_io(socket));
+ config_writew(socket, RL5C4XX_16BIT_MEM_0, rl_mem(socket));
+ config_writew(socket, RL5C4XX_CONFIG, rl_config(socket));
++ ricoh_set_clkrun(socket, true);
+ }
+
+
+@@ -197,6 +231,7 @@ static int ricoh_override(struct yenta_s
+ config_writew(socket, RL5C4XX_CONFIG, config);
+
+ ricoh_set_zv(socket);
++ ricoh_set_clkrun(socket, false);
+
+ return 0;
+ }
+--- a/drivers/pcmcia/yenta_socket.c
++++ b/drivers/pcmcia/yenta_socket.c
+@@ -26,7 +26,8 @@
+
+ static bool disable_clkrun;
+ module_param(disable_clkrun, bool, 0444);
+-MODULE_PARM_DESC(disable_clkrun, "If PC card doesn't function properly, please try this option");
++MODULE_PARM_DESC(disable_clkrun,
++ "If PC card doesn't function properly, please try this option (TI and Ricoh bridges only)");
+
+ static bool isa_probe = 1;
+ module_param(isa_probe, bool, 0444);
bpf-fix-partial-copy-of-map_ptr-when-dst-is-scalar.patch
mips-vdso-reduce-vdso_randomize_size-to-64mb-for-64b.patch
gpio-mxs-get-rid-of-external-api-call.patch
+mtd-rawnand-marvell-fix-the-irq-handler-complete-condition.patch
+mtd-maps-gpio-addr-flash-fix-ioremapped-size.patch
+mtd-spi-nor-fsl-quadspi-fix-read-error-for-flash-size-larger-than-16mb.patch
+mtd-spi-nor-intel-spi-add-support-for-intel-ice-lake-spi-serial-flash.patch
+mtd-spi-nor-fsl-quadspi-don-t-let-einval-on-the-bus.patch
+spi-spi-mem-adjust-op-len-based-on-message-transfer-size-limitations.patch
+spi-bcm-qspi-switch-back-to-reading-flash-using-smaller-chunks.patch
+spi-bcm-qspi-fix-calculation-of-address-length.patch
+bcache-trace-missed-reading-by-cache_missed.patch
+bcache-fix-ioctl-in-flash-device.patch
+bcache-correct-dirty-data-statistics.patch
+bcache-fix-miss-key-refill-end-in-writeback.patch
+hwmon-pmbus-fix-page-count-auto-detection.patch
+jffs2-free-jffs2_sb_info-through-jffs2_kill_sb.patch
+block-setup-bounce-bio_sets-properly.patch
+block-make-sure-discard-bio-is-aligned-with-logical-block-size.patch
+block-make-sure-writesame-bio-is-aligned-with-logical-block-size.patch
+cpufreq-conservative-take-limits-changes-into-account-properly.patch
+dma-mapping-fix-panic-caused-by-passing-empty-cma-command-line-argument.patch
+pcmcia-implement-clkrun-protocol-disabling-for-ricoh-bridges.patch
+acpi-osl-use-jiffies-as-the-time-bassis-for-acpi_os_get_timer.patch
+acpica-aml-interpreter-add-region-addresses-in-global-list-during-initialization.patch
+acpica-aml-parser-fix-parse-loop-to-correctly-skip-erroneous-extended-opcodes.patch
+acpi-nfit-fix-address-range-scrub-completion-tracking.patch
+kprobes-x86-use-preempt_enable-in-optimized_callback.patch
+ipmi-fix-timer-race-with-module-unload.patch
+mailbox-pcc-handle-parse-error.patch
--- /dev/null
+From 0976eda7915507fe94e07870c19d717c9994b57a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
+Date: Thu, 11 Oct 2018 09:40:22 +0200
+Subject: spi: bcm-qspi: fix calculation of address length
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rafał Miłecki <rafal@milecki.pl>
+
+commit 0976eda7915507fe94e07870c19d717c9994b57a upstream.
+
+During implementation of the new API bcm_qspi_bspi_set_flex_mode() has
+been modified breaking calculation of address length. An unnecessary
+multiplication was added breaking flash reads.
+
+Fixes: 5f195ee7d830 ("spi: bcm-qspi: Implement the spi_mem interface")
+Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
+Reviewed-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-bcm-qspi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -355,7 +355,7 @@ static int bcm_qspi_bspi_set_flex_mode(s
+ int bpc = 0, bpp = 0;
+ u8 command = op->cmd.opcode;
+ int width = op->cmd.buswidth ? op->cmd.buswidth : SPI_NBITS_SINGLE;
+- int addrlen = op->addr.nbytes * 8;
++ int addrlen = op->addr.nbytes;
+ int flex_mode = 1;
+
+ dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
--- /dev/null
+From 940ec770c295682993d1cccce3081fd7c74fece8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
+Date: Thu, 11 Oct 2018 09:42:17 +0200
+Subject: spi: bcm-qspi: switch back to reading flash using smaller chunks
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rafał Miłecki <rafal@milecki.pl>
+
+commit 940ec770c295682993d1cccce3081fd7c74fece8 upstream.
+
+Fixing/optimizing bcm_qspi_bspi_read() performance introduced two
+changes:
+1) It added a loop to read all requested data using multiple BSPI ops.
+2) It bumped max size of a single BSPI block request from 256 to 512 B.
+
+The later change resulted in occasional BSPI timeouts causing a
+regression.
+
+For some unknown reason hardware doesn't always handle reads as expected
+when using 512 B chunks. In such cases it may happen that BSPI returns
+amount of requested bytes without the last 1-3 ones. It provides the
+remaining bytes later but doesn't raise an interrupt until another LR
+start.
+
+Switching back to 256 B reads fixes that problem and regression.
+
+Fixes: 345309fa7c0c ("spi: bcm-qspi: Fix bcm_qspi_bspi_read() performance")
+Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-bcm-qspi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -89,7 +89,7 @@
+ #define BSPI_BPP_MODE_SELECT_MASK BIT(8)
+ #define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
+
+-#define BSPI_READ_LENGTH 512
++#define BSPI_READ_LENGTH 256
+
+ /* MSPI register offsets */
+ #define MSPI_SPCR0_LSB 0x000
--- /dev/null
+From e757996cafbeb6b71234a17130674bcd8f44c59e Mon Sep 17 00:00:00 2001
+From: Chuanhua Han <chuanhua.han@nxp.com>
+Date: Thu, 30 Aug 2018 16:43:24 +0800
+Subject: spi: spi-mem: Adjust op len based on message/transfer size limitations
+
+From: Chuanhua Han <chuanhua.han@nxp.com>
+
+commit e757996cafbeb6b71234a17130674bcd8f44c59e upstream.
+
+We need that to adjust the len of the 2nd transfer (called data in
+spi-mem) if it's too long to fit in a SPI message or SPI transfer.
+
+Fixes: c36ff266dc82 ("spi: Extend the core to ease integration of SPI memory controllers")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Chuanhua Han <chuanhua.han@nxp.com>
+Reviewed-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-mem.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/drivers/spi/spi-mem.c
++++ b/drivers/spi/spi-mem.c
+@@ -346,10 +346,25 @@ EXPORT_SYMBOL_GPL(spi_mem_get_name);
+ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+ {
+ struct spi_controller *ctlr = mem->spi->controller;
++ size_t len;
++
++ len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
+
+ if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
+ return ctlr->mem_ops->adjust_op_size(mem, op);
+
++ if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
++ if (len > spi_max_transfer_size(mem->spi))
++ return -EINVAL;
++
++ op->data.nbytes = min3((size_t)op->data.nbytes,
++ spi_max_transfer_size(mem->spi),
++ spi_max_message_size(mem->spi) -
++ len);
++ if (!op->data.nbytes)
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);