]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 7 Feb 2026 15:31:17 +0000 (16:31 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 7 Feb 2026 15:31:17 +0000 (16:31 +0100)
added patches:
bus-mhi-host-pci_generic-add-telit-fe990b40-modem-support.patch
pci-err-ensure-error-recoverability-at-all-times.patch
sched-fair-have-sd_serialize-affect-newidle-balancing.patch
sched-fair-skip-sched_balance_running-cmpxchg-when-balance-is-not-due.patch
treewide-drop-pci_save_state-after-pci_restore_state.patch

queue-6.18/bus-mhi-host-pci_generic-add-telit-fe990b40-modem-support.patch [new file with mode: 0644]
queue-6.18/pci-err-ensure-error-recoverability-at-all-times.patch [new file with mode: 0644]
queue-6.18/sched-fair-have-sd_serialize-affect-newidle-balancing.patch [new file with mode: 0644]
queue-6.18/sched-fair-skip-sched_balance_running-cmpxchg-when-balance-is-not-due.patch [new file with mode: 0644]
queue-6.18/series
queue-6.18/treewide-drop-pci_save_state-after-pci_restore_state.patch [new file with mode: 0644]

diff --git a/queue-6.18/bus-mhi-host-pci_generic-add-telit-fe990b40-modem-support.patch b/queue-6.18/bus-mhi-host-pci_generic-add-telit-fe990b40-modem-support.patch
new file mode 100644 (file)
index 0000000..67dbc65
--- /dev/null
@@ -0,0 +1,52 @@
+From 6eaee77923ddf04beedb832c06f983679586361c Mon Sep 17 00:00:00 2001
+From: Daniele Palmas <dnlplm@gmail.com>
+Date: Wed, 15 Oct 2025 12:20:59 +0200
+Subject: bus: mhi: host: pci_generic: Add Telit FE990B40 modem support
+
+From: Daniele Palmas <dnlplm@gmail.com>
+
+commit 6eaee77923ddf04beedb832c06f983679586361c upstream.
+
+Add SDX72 based modem Telit FE990B40, reusing FN920C04 configuration.
+
+01:00.0 Unassigned class [ff00]: Qualcomm Device 0309
+        Subsystem: Device 1c5d:2025
+
+Signed-off-by: Daniele Palmas <dnlplm@gmail.com>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Link: https://patch.msgid.link/20251015102059.1781001-1-dnlplm@gmail.com
+Signed-off-by: Fabio Porcedda <fabio.porcedda@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/bus/mhi/host/pci_generic.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/bus/mhi/host/pci_generic.c
++++ b/drivers/bus/mhi/host/pci_generic.c
+@@ -877,6 +877,16 @@ static const struct mhi_pci_dev_info mhi
+       .edl_trigger = true,
+ };
++static const struct mhi_pci_dev_info mhi_telit_fe990b40_info = {
++      .name = "telit-fe990b40",
++      .config = &modem_telit_fn920c04_config,
++      .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
++      .dma_data_width = 32,
++      .sideband_wake = false,
++      .mru_default = 32768,
++      .edl_trigger = true,
++};
++
+ static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = {
+       .name = "netprisma-lcur57",
+       .edl = "qcom/prog_firehose_sdx24.mbn",
+@@ -933,6 +943,9 @@ static const struct pci_device_id mhi_pc
+       /* Telit FN990B40 (sdx72) */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0309, 0x1c5d, 0x201a),
+               .driver_data = (kernel_ulong_t) &mhi_telit_fn990b40_info },
++      /* Telit FE990B40 (sdx72) */
++      { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0309, 0x1c5d, 0x2025),
++              .driver_data = (kernel_ulong_t) &mhi_telit_fe990b40_info },
+       { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
+               .driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
+       /* QDU100, x100-DU */
diff --git a/queue-6.18/pci-err-ensure-error-recoverability-at-all-times.patch b/queue-6.18/pci-err-ensure-error-recoverability-at-all-times.patch
new file mode 100644 (file)
index 0000000..a1640b5
--- /dev/null
@@ -0,0 +1,92 @@
+From a2f1e22390ac2ca7ac8d77aa0f78c068b6dd2208 Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Wed, 19 Nov 2025 09:50:03 +0100
+Subject: PCI/ERR: Ensure error recoverability at all times
+
+From: Lukas Wunner <lukas@wunner.de>
+
+commit a2f1e22390ac2ca7ac8d77aa0f78c068b6dd2208 upstream.
+
+When the PCI core gained power management support in 2002, it introduced
+pci_save_state() and pci_restore_state() helpers to restore Config Space
+after a D3hot or D3cold transition, which implies a Soft or Fundamental
+Reset (PCIe r7.0 sec 5.8):
+
+  https://git.kernel.org/tglx/history/c/a5287abe398b
+
+In 2006, EEH and AER were introduced to recover from errors by performing
+a reset.  Because errors can occur at any time, drivers began calling
+pci_save_state() on probe to ensure recoverability.
+
+In 2009, recoverability was foiled by commit c82f63e411f1 ("PCI: check
+saved state before restore"):  It amended pci_restore_state() to bail out
+if the "state_saved" flag has been cleared.  The flag is cleared by
+pci_restore_state() itself, hence a saved state is now allowed to be
+restored only once and is then invalidated.  That doesn't seem to make
+sense because the saved state should be good enough to be reused.
+
+Soon after, drivers began to work around this behavior by calling
+pci_save_state() immediately after pci_restore_state(), see e.g. commit
+b94f2d775a71 ("igb: call pci_save_state after pci_restore_state").
+Hilariously, two drivers even set the "saved_state" flag to true before
+invoking pci_restore_state(), see ipr_reset_restore_cfg_space() and
+e1000_io_slot_reset().
+
+Despite these workarounds, recoverability at all times is not guaranteed:
+E.g. when a PCIe port goes through a runtime suspend and resume cycle,
+the "saved_state" flag is cleared by:
+
+  pci_pm_runtime_resume()
+    pci_pm_default_resume_early()
+      pci_restore_state()
+
+... and hence on a subsequent AER event, the port's Config Space cannot be
+restored.  Riana reports a recovery failure of a GPU-integrated PCIe
+switch and has root-caused it to the behavior of pci_restore_state().
+Another workaround would be necessary, namely calling pci_save_state() in
+pcie_port_device_runtime_resume().
+
+The motivation of commit c82f63e411f1 was to prevent restoring state if
+pci_save_state() hasn't been called before.  But that can be achieved by
+saving state already on device addition, after Config Space has been
+initialized.  A desirable side effect is that devices become recoverable
+even if no driver gets bound.  This renders the commit unnecessary, so
+revert it.
+
+Reported-by: Riana Tauro <riana.tauro@intel.com> # off-list
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Tested-by: Riana Tauro <riana.tauro@intel.com>
+Reviewed-by: Rafael J. Wysocki (Intel) <rafael@kernel.org>
+Link: https://patch.msgid.link/9e34ce61c5404e99ffdd29205122c6fb334b38aa.1763483367.git.lukas@wunner.de
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/bus.c |    3 +++
+ drivers/pci/pci.c |    3 ---
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -357,6 +357,9 @@ void pci_bus_add_device(struct pci_dev *
+       pci_proc_attach_device(dev);
+       pci_bridge_d3_update(dev);
++      /* Save config space for error recoverability */
++      pci_save_state(dev);
++
+       /*
+        * If the PCI device is associated with a pwrctrl device with a
+        * power supply, create a device link between the PCI device and
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1855,9 +1855,6 @@ static void pci_restore_rebar_state(stru
+  */
+ void pci_restore_state(struct pci_dev *dev)
+ {
+-      if (!dev->state_saved)
+-              return;
+-
+       pci_restore_pcie_state(dev);
+       pci_restore_pasid_state(dev);
+       pci_restore_pri_state(dev);
diff --git a/queue-6.18/sched-fair-have-sd_serialize-affect-newidle-balancing.patch b/queue-6.18/sched-fair-have-sd_serialize-affect-newidle-balancing.patch
new file mode 100644 (file)
index 0000000..48b6b45
--- /dev/null
@@ -0,0 +1,39 @@
+From 522fb20fbdbe48ed98f587d628637ff38ececd2d Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 17 Nov 2025 17:13:09 +0100
+Subject: sched/fair: Have SD_SERIALIZE affect newidle balancing
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 522fb20fbdbe48ed98f587d628637ff38ececd2d upstream.
+
+Also serialize the possiblty much more frequent newidle balancing for
+the 'expensive' domains that have SD_BALANCE set.
+
+Initial benchmarking by K Prateek and Tim showed no negative effect.
+
+Split out from the larger patch moving sched_balance_running around
+for ease of bisect and such.
+
+Suggested-by: Shrikanth Hegde <sshegde@linux.ibm.com>
+Seconded-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/df068896-82f9-458d-8fff-5a2f654e8ffd@amd.com
+Link: https://patch.msgid.link/6fed119b723c71552943bfe5798c93851b30a361.1762800251.git.tim.c.chen@linux.intel.com
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/fair.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -11744,7 +11744,7 @@ redo:
+               goto out_balanced;
+       }
+-      if (!need_unlock && (sd->flags & SD_SERIALIZE) && idle != CPU_NEWLY_IDLE) {
++      if (!need_unlock && (sd->flags & SD_SERIALIZE)) {
+               int zero = 0;
+               if (!atomic_try_cmpxchg_acquire(&sched_balance_running, &zero, 1))
+                       goto out_balanced;
diff --git a/queue-6.18/sched-fair-skip-sched_balance_running-cmpxchg-when-balance-is-not-due.patch b/queue-6.18/sched-fair-skip-sched_balance_running-cmpxchg-when-balance-is-not-due.patch
new file mode 100644 (file)
index 0000000..e1136f0
--- /dev/null
@@ -0,0 +1,177 @@
+From 3324b2180c17b21c31c16966cc85ca41a7c93703 Mon Sep 17 00:00:00 2001
+From: Tim Chen <tim.c.chen@linux.intel.com>
+Date: Mon, 10 Nov 2025 10:47:35 -0800
+Subject: sched/fair: Skip sched_balance_running cmpxchg when balance is not due
+
+From: Tim Chen <tim.c.chen@linux.intel.com>
+
+commit 3324b2180c17b21c31c16966cc85ca41a7c93703 upstream.
+
+The NUMA sched domain sets the SD_SERIALIZE flag by default, allowing
+only one NUMA load balancing operation to run system-wide at a time.
+
+Currently, each sched group leader directly under NUMA domain attempts
+to acquire the global sched_balance_running flag via cmpxchg() before
+checking whether load balancing is due or whether it is the designated
+load balancer for that NUMA domain. On systems with a large number
+of cores, this causes significant cache contention on the shared
+sched_balance_running flag.
+
+This patch reduces unnecessary cmpxchg() operations by first checking
+that the balancer is the designated leader for a NUMA domain from
+should_we_balance(), and the balance interval has expired before
+trying to acquire sched_balance_running to load balance a NUMA
+domain.
+
+On a 2-socket Granite Rapids system with sub-NUMA clustering enabled,
+running an OLTP workload, 7.8% of total CPU cycles were previously spent
+in sched_balance_domain() contending on sched_balance_running before
+this change.
+
+         : 104              static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+         : 105              {
+         : 106              return arch_cmpxchg(&v->counter, old, new);
+    0.00 :   ffffffff81326e6c:       xor    %eax,%eax
+    0.00 :   ffffffff81326e6e:       mov    $0x1,%ecx
+    0.00 :   ffffffff81326e73:       lock cmpxchg %ecx,0x2394195(%rip)        # ffffffff836bb010 <sched_balance_running>
+         : 110              sched_balance_domains():
+         : 12234            if (atomic_cmpxchg_acquire(&sched_balance_running, 0, 1))
+   99.39 :   ffffffff81326e7b:       test   %eax,%eax
+    0.00 :   ffffffff81326e7d:       jne    ffffffff81326e99 <sched_balance_domains+0x209>
+         : 12238            if (time_after_eq(jiffies, sd->last_balance + interval)) {
+    0.00 :   ffffffff81326e7f:       mov    0x14e2b3a(%rip),%rax        # ffffffff828099c0 <jiffies_64>
+    0.00 :   ffffffff81326e86:       sub    0x48(%r14),%rax
+    0.00 :   ffffffff81326e8a:       cmp    %rdx,%rax
+
+After applying this fix, sched_balance_domain() is gone from the profile
+and there is a 5% throughput improvement.
+
+[peterz: made it so that redo retains the 'lock' and split out the
+         CPU_NEWLY_IDLE change to a separate patch]
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Chen Yu <yu.c.chen@intel.com>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
+Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Reviewed-by: Srikar Dronamraju <srikar@linux.ibm.com>
+Tested-by: Mohini Narkhede <mohini.narkhede@intel.com>
+Tested-by: Shrikanth Hegde <sshegde@linux.ibm.com>
+Link: https://patch.msgid.link/6fed119b723c71552943bfe5798c93851b30a361.1762800251.git.tim.c.chen@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/fair.c |   54 ++++++++++++++++++++++++++--------------------------
+ 1 file changed, 28 insertions(+), 26 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -11693,6 +11693,21 @@ static void update_lb_imbalance_stat(str
+ }
+ /*
++ * This flag serializes load-balancing passes over large domains
++ * (above the NODE topology level) - only one load-balancing instance
++ * may run at a time, to reduce overhead on very large systems with
++ * lots of CPUs and large NUMA distances.
++ *
++ * - Note that load-balancing passes triggered while another one
++ *   is executing are skipped and not re-tried.
++ *
++ * - Also note that this does not serialize rebalance_domains()
++ *   execution, as non-SD_SERIALIZE domains will still be
++ *   load-balanced in parallel.
++ */
++static atomic_t sched_balance_running = ATOMIC_INIT(0);
++
++/*
+  * Check this_cpu to ensure it is balanced within domain. Attempt to move
+  * tasks if there is an imbalance.
+  */
+@@ -11717,6 +11732,7 @@ static int sched_balance_rq(int this_cpu
+               .fbq_type       = all,
+               .tasks          = LIST_HEAD_INIT(env.tasks),
+       };
++      bool need_unlock = false;
+       cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
+@@ -11728,6 +11744,14 @@ redo:
+               goto out_balanced;
+       }
++      if (!need_unlock && (sd->flags & SD_SERIALIZE) && idle != CPU_NEWLY_IDLE) {
++              int zero = 0;
++              if (!atomic_try_cmpxchg_acquire(&sched_balance_running, &zero, 1))
++                      goto out_balanced;
++
++              need_unlock = true;
++      }
++
+       group = sched_balance_find_src_group(&env);
+       if (!group) {
+               schedstat_inc(sd->lb_nobusyg[idle]);
+@@ -11968,6 +11992,9 @@ out_one_pinned:
+           sd->balance_interval < sd->max_interval)
+               sd->balance_interval *= 2;
+ out:
++      if (need_unlock)
++              atomic_set_release(&sched_balance_running, 0);
++
+       return ld_moved;
+ }
+@@ -12093,21 +12120,6 @@ out_unlock:
+ }
+ /*
+- * This flag serializes load-balancing passes over large domains
+- * (above the NODE topology level) - only one load-balancing instance
+- * may run at a time, to reduce overhead on very large systems with
+- * lots of CPUs and large NUMA distances.
+- *
+- * - Note that load-balancing passes triggered while another one
+- *   is executing are skipped and not re-tried.
+- *
+- * - Also note that this does not serialize rebalance_domains()
+- *   execution, as non-SD_SERIALIZE domains will still be
+- *   load-balanced in parallel.
+- */
+-static atomic_t sched_balance_running = ATOMIC_INIT(0);
+-
+-/*
+  * Scale the max sched_balance_rq interval with the number of CPUs in the system.
+  * This trades load-balance latency on larger machines for less cross talk.
+  */
+@@ -12175,7 +12187,7 @@ static void sched_balance_domains(struct
+       /* Earliest time when we have to do rebalance again */
+       unsigned long next_balance = jiffies + 60*HZ;
+       int update_next_balance = 0;
+-      int need_serialize, need_decay = 0;
++      int need_decay = 0;
+       u64 max_cost = 0;
+       rcu_read_lock();
+@@ -12199,13 +12211,6 @@ static void sched_balance_domains(struct
+               }
+               interval = get_sd_balance_interval(sd, busy);
+-
+-              need_serialize = sd->flags & SD_SERIALIZE;
+-              if (need_serialize) {
+-                      if (atomic_cmpxchg_acquire(&sched_balance_running, 0, 1))
+-                              goto out;
+-              }
+-
+               if (time_after_eq(jiffies, sd->last_balance + interval)) {
+                       if (sched_balance_rq(cpu, rq, sd, idle, &continue_balancing)) {
+                               /*
+@@ -12219,9 +12224,6 @@ static void sched_balance_domains(struct
+                       sd->last_balance = jiffies;
+                       interval = get_sd_balance_interval(sd, busy);
+               }
+-              if (need_serialize)
+-                      atomic_set_release(&sched_balance_running, 0);
+-out:
+               if (time_after(next_balance, sd->last_balance + interval)) {
+                       next_balance = sd->last_balance + interval;
+                       update_next_balance = 1;
index fbcc953f1a851fecc87445f52806a9cd913d8592..c3af463a468131c527b9f0e57afb494db3d56242 100644 (file)
@@ -33,3 +33,8 @@ kvm-selftests-add-u_fortify_source-to-avoid-some-unpredictable-test-failures.pat
 kvm-don-t-clobber-irqfd-routing-type-when-deassigning-irqfd.patch
 hwmon-gpio-fan-fix-set_rpm-return-value.patch
 hwmon-gpio-fan-allow-to-stop-fans-when-config_pm-is-disabled.patch
+pci-err-ensure-error-recoverability-at-all-times.patch
+treewide-drop-pci_save_state-after-pci_restore_state.patch
+bus-mhi-host-pci_generic-add-telit-fe990b40-modem-support.patch
+sched-fair-skip-sched_balance_running-cmpxchg-when-balance-is-not-due.patch
+sched-fair-have-sd_serialize-affect-newidle-balancing.patch
diff --git a/queue-6.18/treewide-drop-pci_save_state-after-pci_restore_state.patch b/queue-6.18/treewide-drop-pci_save_state-after-pci_restore_state.patch
new file mode 100644 (file)
index 0000000..ea85049
--- /dev/null
@@ -0,0 +1,432 @@
+From 383d89699c5028de510a6667f674ed38585f77fc Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Sun, 12 Oct 2025 15:25:02 +0200
+Subject: treewide: Drop pci_save_state() after pci_restore_state()
+
+From: Lukas Wunner <lukas@wunner.de>
+
+commit 383d89699c5028de510a6667f674ed38585f77fc upstream.
+
+In 2009, commit c82f63e411f1 ("PCI: check saved state before restore")
+changed the behavior of pci_restore_state() such that it became necessary
+to call pci_save_state() afterwards, lest recovery from subsequent PCI
+errors fails.
+
+The commit has just been reverted and so all the pci_save_state() after
+pci_restore_state() calls that have accumulated in the tree are now
+superfluous.  Drop them.
+
+Two drivers chose a different approach to achieve the same result:
+drivers/scsi/ipr.c and drivers/net/ethernet/intel/e1000e/netdev.c set the
+pci_dev's "state_saved" flag to true before calling pci_restore_state().
+Drop this as well.
+
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Acked-by: Dave Jiang <dave.jiang@intel.com>
+Acked-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>  # qat
+Link: https://patch.msgid.link/c2b28cc4defa1b743cf1dedee23c455be98b397a.1760274044.git.lukas@wunner.de
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/intel/qat/qat_common/adf_aer.c    |    2 --
+ drivers/dma/ioat/init.c                          |    1 -
+ drivers/net/ethernet/broadcom/bnx2.c             |    2 --
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c |    1 -
+ drivers/net/ethernet/broadcom/tg3.c              |    1 -
+ drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c  |    1 -
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c  |    2 --
+ drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c |    1 -
+ drivers/net/ethernet/intel/e1000e/netdev.c       |    1 -
+ drivers/net/ethernet/intel/fm10k/fm10k_pci.c     |    6 ------
+ drivers/net/ethernet/intel/i40e/i40e_main.c      |    1 -
+ drivers/net/ethernet/intel/ice/ice_main.c        |    2 --
+ drivers/net/ethernet/intel/igb/igb_main.c        |    2 --
+ drivers/net/ethernet/intel/igc/igc_main.c        |    2 --
+ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c    |    1 -
+ drivers/net/ethernet/mellanox/mlx4/main.c        |    1 -
+ drivers/net/ethernet/mellanox/mlx5/core/main.c   |    1 -
+ drivers/net/ethernet/meta/fbnic/fbnic_pci.c      |    1 -
+ drivers/net/ethernet/microchip/lan743x_main.c    |    1 -
+ drivers/net/ethernet/myricom/myri10ge/myri10ge.c |    4 ----
+ drivers/net/ethernet/neterion/s2io.c             |    1 -
+ drivers/pci/pcie/portdrv.c                       |    1 -
+ drivers/scsi/bfa/bfad.c                          |    1 -
+ drivers/scsi/csiostor/csio_init.c                |    1 -
+ drivers/scsi/ipr.c                               |    1 -
+ drivers/scsi/lpfc/lpfc_init.c                    |    6 ------
+ drivers/scsi/qla2xxx/qla_os.c                    |    5 -----
+ drivers/scsi/qla4xxx/ql4_os.c                    |    5 -----
+ drivers/tty/serial/8250/8250_pci.c               |    1 -
+ drivers/tty/serial/jsm/jsm_driver.c              |    1 -
+ 30 files changed, 57 deletions(-)
+
+--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
+@@ -103,7 +103,6 @@ void adf_dev_restore(struct adf_accel_de
+                        accel_dev->accel_id);
+               hw_device->reset_device(accel_dev);
+               pci_restore_state(pdev);
+-              pci_save_state(pdev);
+       }
+ }
+@@ -202,7 +201,6 @@ static pci_ers_result_t adf_slot_reset(s
+       if (!pdev->is_busmaster)
+               pci_set_master(pdev);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       res = adf_dev_up(accel_dev, false);
+       if (res && res != -EALREADY)
+               return PCI_ERS_RESULT_DISCONNECT;
+--- a/drivers/dma/ioat/init.c
++++ b/drivers/dma/ioat/init.c
+@@ -1286,7 +1286,6 @@ static pci_ers_result_t ioat_pcie_error_
+       } else {
+               pci_set_master(pdev);
+               pci_restore_state(pdev);
+-              pci_save_state(pdev);
+               pci_wake_from_d3(pdev, false);
+       }
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -6444,7 +6444,6 @@ bnx2_reset_task(struct work_struct *work
+       if (!(pcicmd & PCI_COMMAND_MEMORY)) {
+               /* in case PCI block has reset */
+               pci_restore_state(bp->pdev);
+-              pci_save_state(bp->pdev);
+       }
+       rc = bnx2_init_nic(bp, 1);
+       if (rc) {
+@@ -8718,7 +8717,6 @@ static pci_ers_result_t bnx2_io_slot_res
+       } else {
+               pci_set_master(pdev);
+               pci_restore_state(pdev);
+-              pci_save_state(pdev);
+               if (netif_running(dev))
+                       err = bnx2_init_nic(bp, 1);
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -14216,7 +14216,6 @@ static pci_ers_result_t bnx2x_io_slot_re
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       if (netif_running(dev))
+               bnx2x_set_power_state(bp, PCI_D0);
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -18349,7 +18349,6 @@ static pci_ers_result_t tg3_io_slot_rese
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       if (!netdev || !netif_running(netdev)) {
+               rc = PCI_ERS_RESULT_RECOVERED;
+--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+@@ -2933,7 +2933,6 @@ static int t3_reenable_adapter(struct ad
+       }
+       pci_set_master(adapter->pdev);
+       pci_restore_state(adapter->pdev);
+-      pci_save_state(adapter->pdev);
+       /* Free sge resources */
+       t3_free_sge_resources(adapter);
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -5456,7 +5456,6 @@ static pci_ers_result_t eeh_slot_reset(s
+       if (!adap) {
+               pci_restore_state(pdev);
+-              pci_save_state(pdev);
+               return PCI_ERS_RESULT_RECOVERED;
+       }
+@@ -5471,7 +5470,6 @@ static pci_ers_result_t eeh_slot_reset(s
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       if (t4_wait_dev_ready(adap->regs) < 0)
+               return PCI_ERS_RESULT_DISCONNECT;
+--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
++++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+@@ -160,7 +160,6 @@ static pci_ers_result_t hbg_pci_err_slot
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       hbg_err_reset(priv);
+       return PCI_ERS_RESULT_RECOVERED;
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -7195,7 +7195,6 @@ static pci_ers_result_t e1000_io_slot_re
+                       "Cannot re-enable PCI device after reset.\n");
+               result = PCI_ERS_RESULT_DISCONNECT;
+       } else {
+-              pdev->state_saved = true;
+               pci_restore_state(pdev);
+               pci_set_master(pdev);
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+@@ -2423,12 +2423,6 @@ static pci_ers_result_t fm10k_io_slot_re
+       } else {
+               pci_set_master(pdev);
+               pci_restore_state(pdev);
+-
+-              /* After second error pci->state_saved is false, this
+-               * resets it so EEH doesn't break.
+-               */
+-              pci_save_state(pdev);
+-
+               pci_wake_from_d3(pdev, false);
+               result = PCI_ERS_RESULT_RECOVERED;
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -16456,7 +16456,6 @@ static pci_ers_result_t i40e_pci_error_s
+       } else {
+               pci_set_master(pdev);
+               pci_restore_state(pdev);
+-              pci_save_state(pdev);
+               pci_wake_from_d3(pdev, false);
+               reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -5661,7 +5661,6 @@ static int ice_resume(struct device *dev
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       if (!pci_device_is_present(pdev))
+               return -ENODEV;
+@@ -5761,7 +5760,6 @@ static pci_ers_result_t ice_pci_err_slot
+       } else {
+               pci_set_master(pdev);
+               pci_restore_state(pdev);
+-              pci_save_state(pdev);
+               pci_wake_from_d3(pdev, false);
+               /* Check for life */
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -9599,7 +9599,6 @@ static int __igb_resume(struct device *d
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       if (!pci_device_is_present(pdev))
+               return -ENODEV;
+@@ -9754,7 +9753,6 @@ static pci_ers_result_t igb_io_slot_rese
+       } else {
+               pci_set_master(pdev);
+               pci_restore_state(pdev);
+-              pci_save_state(pdev);
+               pci_enable_wake(pdev, PCI_D3hot, 0);
+               pci_enable_wake(pdev, PCI_D3cold, 0);
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -7530,7 +7530,6 @@ static int __igc_resume(struct device *d
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       if (!pci_device_is_present(pdev))
+               return -ENODEV;
+@@ -7667,7 +7666,6 @@ static pci_ers_result_t igc_io_slot_rese
+       } else {
+               pci_set_master(pdev);
+               pci_restore_state(pdev);
+-              pci_save_state(pdev);
+               pci_enable_wake(pdev, PCI_D3hot, 0);
+               pci_enable_wake(pdev, PCI_D3cold, 0);
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -12292,7 +12292,6 @@ static pci_ers_result_t ixgbe_io_slot_re
+               adapter->hw.hw_addr = adapter->io_addr;
+               pci_set_master(pdev);
+               pci_restore_state(pdev);
+-              pci_save_state(pdev);
+               pci_wake_from_d3(pdev, false);
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -4366,7 +4366,6 @@ static pci_ers_result_t mlx4_pci_slot_re
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       return PCI_ERS_RESULT_RECOVERED;
+ }
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -2100,7 +2100,6 @@ static pci_ers_result_t mlx5_pci_slot_re
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       err = wait_vital(pdev);
+       if (err) {
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+@@ -574,7 +574,6 @@ static pci_ers_result_t fbnic_err_slot_r
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       if (pci_enable_device_mem(pdev)) {
+               dev_err(&pdev->dev,
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -3915,7 +3915,6 @@ static int lan743x_pm_resume(struct devi
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       /* Restore HW_CFG that was saved during pm suspend */
+       if (adapter->is_pci11x1x)
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+@@ -3416,10 +3416,6 @@ static void myri10ge_watchdog(struct wor
+                * nic was resumed from power saving mode.
+                */
+               pci_restore_state(mgp->pdev);
+-
+-              /* save state again for accounting reasons */
+-              pci_save_state(mgp->pdev);
+-
+       } else {
+               /* if we get back -1's from our slot, perhaps somebody
+                * powered off our card.  Don't try to reset it in
+--- a/drivers/net/ethernet/neterion/s2io.c
++++ b/drivers/net/ethernet/neterion/s2io.c
+@@ -3425,7 +3425,6 @@ static void s2io_reset(struct s2io_nic *
+               /* Restore the PCI state saved during initialization. */
+               pci_restore_state(sp->pdev);
+-              pci_save_state(sp->pdev);
+               pci_read_config_word(sp->pdev, 0x2, &val16);
+               if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
+                       break;
+--- a/drivers/pci/pcie/portdrv.c
++++ b/drivers/pci/pcie/portdrv.c
+@@ -760,7 +760,6 @@ static pci_ers_result_t pcie_portdrv_slo
+       device_for_each_child(&dev->dev, &off, pcie_port_device_iter);
+       pci_restore_state(dev);
+-      pci_save_state(dev);
+       return PCI_ERS_RESULT_RECOVERED;
+ }
+--- a/drivers/scsi/bfa/bfad.c
++++ b/drivers/scsi/bfa/bfad.c
+@@ -1528,7 +1528,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev
+               goto out_disable_device;
+       }
+-      pci_save_state(pdev);
+       pci_set_master(pdev);
+       rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
+--- a/drivers/scsi/csiostor/csio_init.c
++++ b/drivers/scsi/csiostor/csio_init.c
+@@ -1093,7 +1093,6 @@ csio_pci_slot_reset(struct pci_dev *pdev
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       /* Bring HW s/m to ready state.
+        * but don't resume IOs.
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -7883,7 +7883,6 @@ static int ipr_reset_restore_cfg_space(s
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       ENTER;
+-      ioa_cfg->pdev->state_saved = true;
+       pci_restore_state(ioa_cfg->pdev);
+       if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -14434,12 +14434,6 @@ lpfc_io_slot_reset_s3(struct pci_dev *pd
+       pci_restore_state(pdev);
+-      /*
+-       * As the new kernel behavior of pci_restore_state() API call clears
+-       * device saved_state flag, need to save the restored state again.
+-       */
+-      pci_save_state(pdev);
+-
+       if (pdev->is_busmaster)
+               pci_set_master(pdev);
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -7890,11 +7890,6 @@ qla2xxx_pci_slot_reset(struct pci_dev *p
+       pci_restore_state(pdev);
+-      /* pci_restore_state() clears the saved_state flag of the device
+-       * save restored state which resets saved_state flag
+-       */
+-      pci_save_state(pdev);
+-
+       if (ha->mem_only)
+               rc = pci_enable_device_mem(pdev);
+       else
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -9796,11 +9796,6 @@ qla4xxx_pci_slot_reset(struct pci_dev *p
+        */
+       pci_restore_state(pdev);
+-      /* pci_restore_state() clears the saved_state flag of the device
+-       * save restored state which resets saved_state flag
+-       */
+-      pci_save_state(pdev);
+-
+       /* Initialize device or resume if in suspended state */
+       rc = pci_enable_device(pdev);
+       if (rc) {
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -6215,7 +6215,6 @@ static pci_ers_result_t serial8250_io_sl
+               return PCI_ERS_RESULT_DISCONNECT;
+       pci_restore_state(dev);
+-      pci_save_state(dev);
+       return PCI_ERS_RESULT_RECOVERED;
+ }
+--- a/drivers/tty/serial/jsm/jsm_driver.c
++++ b/drivers/tty/serial/jsm/jsm_driver.c
+@@ -355,7 +355,6 @@ static void jsm_io_resume(struct pci_dev
+       struct jsm_board *brd = pci_get_drvdata(pdev);
+       pci_restore_state(pdev);
+-      pci_save_state(pdev);
+       jsm_uart_port_init(brd);
+ }