]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.19
authorSasha Levin <sashal@kernel.org>
Mon, 12 Sep 2022 10:56:15 +0000 (06:56 -0400)
committerSasha Levin <sashal@kernel.org>
Mon, 12 Sep 2022 10:56:15 +0000 (06:56 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
20 files changed:
queue-5.19/bonding-accept-unsolicited-na-message.patch [new file with mode: 0644]
queue-5.19/hwmon-mr75203-enable-polling-for-all-vm-channels.patch [new file with mode: 0644]
queue-5.19/hwmon-mr75203-fix-multi-channel-voltage-reading.patch [new file with mode: 0644]
queue-5.19/hwmon-mr75203-fix-vm-sensor-allocation-when-intel-vm.patch [new file with mode: 0644]
queue-5.19/hwmon-mr75203-fix-voltage-equation-for-negative-sour.patch [new file with mode: 0644]
queue-5.19/hwmon-mr75203-update-pvt-v_num-and-vm_num-to-the-act.patch [new file with mode: 0644]
queue-5.19/i40e-fix-adq-rate-limiting-for-pf.patch [new file with mode: 0644]
queue-5.19/i40e-refactor-tc-mqprio-checks.patch [new file with mode: 0644]
queue-5.19/iommu-amd-use-full-64-bit-value-in-build_completion_.patch [new file with mode: 0644]
queue-5.19/iommu-vt-d-fix-possible-recursive-locking-in-intel_i.patch [new file with mode: 0644]
queue-5.19/kbuild-disable-header-exports-for-uml-in-a-straightf.patch [new file with mode: 0644]
queue-5.19/mips-loongson32-ls1c-fix-hang-during-startup.patch [new file with mode: 0644]
queue-5.19/net-bonding-replace-dev_trans_start-with-the-jiffies.patch [new file with mode: 0644]
queue-5.19/net-dsa-felix-tc-taprio-intervals-smaller-than-mtu-s.patch [new file with mode: 0644]
queue-5.19/perf-evlist-always-use-arch_evlist__add_default_attr.patch [new file with mode: 0644]
queue-5.19/perf-stat-fix-l2-topdown-metrics-disappear-for-raw-e.patch [new file with mode: 0644]
queue-5.19/s390-boot-fix-absolute-zero-lowcore-corruption-on-bo.patch [new file with mode: 0644]
queue-5.19/series
queue-5.19/swiotlb-avoid-potential-left-shift-overflow.patch [new file with mode: 0644]
queue-5.19/time64.h-consolidate-uses-of-psec_per_nsec.patch [new file with mode: 0644]

diff --git a/queue-5.19/bonding-accept-unsolicited-na-message.patch b/queue-5.19/bonding-accept-unsolicited-na-message.patch
new file mode 100644 (file)
index 0000000..08a80c3
--- /dev/null
@@ -0,0 +1,70 @@
+From ef77addad77dc6ba38c224f7787da78cfe6bcd20 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Aug 2022 17:37:22 +0800
+Subject: bonding: accept unsolicited NA message
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 592335a4164c3c41f57967223a1e1efe3a0c6eb3 ]
+
+The unsolicited NA message with all-nodes multicast dest address should
+be valid, as this also means the link could reach the target.
+
+Also rename bond_validate_ns() to bond_validate_na().
+
+Reported-by: LiLiang <liali@redhat.com>
+Fixes: 5e1eeef69c0f ("bonding: NS target should accept link local address")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 01b58b7e7f165..bff0bfd10e235 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3174,12 +3174,19 @@ static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
+       return ret;
+ }
+-static void bond_validate_ns(struct bonding *bond, struct slave *slave,
++static void bond_validate_na(struct bonding *bond, struct slave *slave,
+                            struct in6_addr *saddr, struct in6_addr *daddr)
+ {
+       int i;
+-      if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) {
++      /* Ignore NAs that:
++       * 1. Source address is unspecified address.
++       * 2. Dest address is neither all-nodes multicast address nor
++       *    exist on bond interface.
++       */
++      if (ipv6_addr_any(saddr) ||
++          (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
++           !bond_has_this_ip6(bond, daddr))) {
+               slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
+                         __func__, saddr, daddr);
+               return;
+@@ -3222,14 +3229,14 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
+        * see bond_arp_rcv().
+        */
+       if (bond_is_active_slave(slave))
+-              bond_validate_ns(bond, slave, saddr, daddr);
++              bond_validate_na(bond, slave, saddr, daddr);
+       else if (curr_active_slave &&
+                time_after(slave_last_rx(bond, curr_active_slave),
+                           curr_active_slave->last_link_up))
+-              bond_validate_ns(bond, slave, saddr, daddr);
++              bond_validate_na(bond, slave, saddr, daddr);
+       else if (curr_arp_slave &&
+                bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
+-              bond_validate_ns(bond, slave, saddr, daddr);
++              bond_validate_na(bond, slave, saddr, daddr);
+ out:
+       return RX_HANDLER_ANOTHER;
+-- 
+2.35.1
+
diff --git a/queue-5.19/hwmon-mr75203-enable-polling-for-all-vm-channels.patch b/queue-5.19/hwmon-mr75203-enable-polling-for-all-vm-channels.patch
new file mode 100644 (file)
index 0000000..bef9c39
--- /dev/null
@@ -0,0 +1,51 @@
+From b2fd0364cd39fa0ac78672b6d7566551db759e64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Sep 2022 15:24:34 +0000
+Subject: hwmon: (mr75203) enable polling for all VM channels
+
+From: Eliav Farber <farbere@amazon.com>
+
+[ Upstream commit e43212e0f55dc2d6b15d6c174cc0a64b25fab5e7 ]
+
+Configure ip-polling register to enable polling for all voltage monitor
+channels.
+This enables reading the voltage values for all inputs other than just
+input 0.
+
+Fixes: 9d823351a337 ("hwmon: Add hardware monitoring driver for Moortec MR75203 PVT controller")
+Signed-off-by: Eliav Farber <farbere@amazon.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20220908152449.35457-7-farbere@amazon.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/mr75203.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
+index 812ae40e7d74d..9259779cc2dff 100644
+--- a/drivers/hwmon/mr75203.c
++++ b/drivers/hwmon/mr75203.c
+@@ -388,6 +388,19 @@ static int pvt_init(struct pvt_device *pvt)
+               if (ret)
+                       return ret;
++              val = (BIT(pvt->c_num) - 1) | VM_CH_INIT |
++                    IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG;
++              ret = regmap_write(v_map, SDIF_W, val);
++              if (ret < 0)
++                      return ret;
++
++              ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
++                                             val, !(val & SDIF_BUSY),
++                                             PVT_POLL_DELAY_US,
++                                             PVT_POLL_TIMEOUT_US);
++              if (ret)
++                      return ret;
++
+               val = CFG1_VOL_MEAS_MODE | CFG1_PARALLEL_OUT |
+                     CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT |
+                     SDIF_WRN_W | SDIF_PROG;
+-- 
+2.35.1
+
diff --git a/queue-5.19/hwmon-mr75203-fix-multi-channel-voltage-reading.patch b/queue-5.19/hwmon-mr75203-fix-multi-channel-voltage-reading.patch
new file mode 100644 (file)
index 0000000..a244ad5
--- /dev/null
@@ -0,0 +1,136 @@
+From 1d7928ccdda95ae950de0303c18913356665675c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Sep 2022 15:24:33 +0000
+Subject: hwmon: (mr75203) fix multi-channel voltage reading
+
+From: Eliav Farber <farbere@amazon.com>
+
+[ Upstream commit 91a9e063cdcfca8fe642b078d6fae4ce49187975 ]
+
+Fix voltage allocation and reading to support all channels in all VMs.
+Prior to this change allocation and reading were done only for the first
+channel in each VM.
+This change counts the total number of channels for allocation, and takes
+into account the channel offset when reading the sample data register.
+
+Fixes: 9d823351a337 ("hwmon: Add hardware monitoring driver for Moortec MR75203 PVT controller")
+Signed-off-by: Eliav Farber <farbere@amazon.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20220908152449.35457-6-farbere@amazon.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/mr75203.c | 29 +++++++++++++++++------------
+ 1 file changed, 17 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
+index 6d3b3c499ed83..812ae40e7d74d 100644
+--- a/drivers/hwmon/mr75203.c
++++ b/drivers/hwmon/mr75203.c
+@@ -68,8 +68,9 @@
+ /* VM Individual Macro Register */
+ #define VM_COM_REG_SIZE       0x200
+-#define VM_SDIF_DONE(n)       (VM_COM_REG_SIZE + 0x34 + 0x200 * (n))
+-#define VM_SDIF_DATA(n)       (VM_COM_REG_SIZE + 0x40 + 0x200 * (n))
++#define VM_SDIF_DONE(vm)      (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm))
++#define VM_SDIF_DATA(vm, ch)  \
++      (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch))
+ /* SDA Slave Register */
+ #define IP_CTRL                       0x00
+@@ -115,6 +116,7 @@ struct pvt_device {
+       u32                     t_num;
+       u32                     p_num;
+       u32                     v_num;
++      u32                     c_num;
+       u32                     ip_freq;
+       u8                      *vm_idx;
+ };
+@@ -178,14 +180,15 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
+ {
+       struct pvt_device *pvt = dev_get_drvdata(dev);
+       struct regmap *v_map = pvt->v_map;
++      u8 vm_idx, ch_idx;
+       u32 n, stat;
+-      u8 vm_idx;
+       int ret;
+-      if (channel >= pvt->v_num)
++      if (channel >= pvt->v_num * pvt->c_num)
+               return -EINVAL;
+-      vm_idx = pvt->vm_idx[channel];
++      vm_idx = pvt->vm_idx[channel / pvt->c_num];
++      ch_idx = channel % pvt->c_num;
+       switch (attr) {
+       case hwmon_in_input:
+@@ -196,7 +199,7 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
+               if (ret)
+                       return ret;
+-              ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n);
++              ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n);
+               if(ret < 0)
+                       return ret;
+@@ -499,8 +502,8 @@ static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt
+ static int mr75203_probe(struct platform_device *pdev)
+ {
++      u32 ts_num, vm_num, pd_num, ch_num, val, index, i;
+       const struct hwmon_channel_info **pvt_info;
+-      u32 ts_num, vm_num, pd_num, val, index, i;
+       struct device *dev = &pdev->dev;
+       u32 *temp_config, *in_config;
+       struct device *hwmon_dev;
+@@ -541,9 +544,11 @@ static int mr75203_probe(struct platform_device *pdev)
+       ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT;
+       pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT;
+       vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT;
++      ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT;
+       pvt->t_num = ts_num;
+       pvt->p_num = pd_num;
+       pvt->v_num = vm_num;
++      pvt->c_num = ch_num;
+       val = 0;
+       if (ts_num)
+               val++;
+@@ -580,7 +585,7 @@ static int mr75203_probe(struct platform_device *pdev)
+       }
+       if (vm_num) {
+-              u32 num = vm_num;
++              u32 total_ch;
+               ret = pvt_get_regmap(pdev, "vm", pvt);
+               if (ret)
+@@ -604,20 +609,20 @@ static int mr75203_probe(struct platform_device *pdev)
+                       for (i = 0; i < vm_num; i++)
+                               if (pvt->vm_idx[i] >= vm_num ||
+                                   pvt->vm_idx[i] == 0xff) {
+-                                      num = i;
+                                       pvt->v_num = i;
+                                       vm_num = i;
+                                       break;
+                               }
+               }
+-              in_config = devm_kcalloc(dev, num + 1,
++              total_ch = ch_num * vm_num;
++              in_config = devm_kcalloc(dev, total_ch + 1,
+                                        sizeof(*in_config), GFP_KERNEL);
+               if (!in_config)
+                       return -ENOMEM;
+-              memset32(in_config, HWMON_I_INPUT, num);
+-              in_config[num] = 0;
++              memset32(in_config, HWMON_I_INPUT, total_ch);
++              in_config[total_ch] = 0;
+               pvt_in.config = in_config;
+               pvt_info[index++] = &pvt_in;
+-- 
+2.35.1
+
diff --git a/queue-5.19/hwmon-mr75203-fix-vm-sensor-allocation-when-intel-vm.patch b/queue-5.19/hwmon-mr75203-fix-vm-sensor-allocation-when-intel-vm.patch
new file mode 100644 (file)
index 0000000..e44756b
--- /dev/null
@@ -0,0 +1,71 @@
+From dc24904e16925830912e7022805a06cc664e15c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Sep 2022 15:24:30 +0000
+Subject: hwmon: (mr75203) fix VM sensor allocation when "intel,vm-map" not
+ defined
+
+From: Eliav Farber <farbere@amazon.com>
+
+[ Upstream commit 81114fc3d27bf5b06b2137d2fd2b63da656a8b90 ]
+
+Bug - in case "intel,vm-map" is missing in device-tree ,'num' is set
+to 0, and no voltage channel infos are allocated.
+
+The reason num is set to 0 when "intel,vm-map" is missing is to set the
+entire pvt->vm_idx[] with incremental channel numbers, but it didn't
+take into consideration that same num is used later in devm_kcalloc().
+
+If "intel,vm-map" does exist there is no need to set the unspecified
+channels with incremental numbers, because the unspecified channels
+can't be accessed in pvt_read_in() which is the only other place besides
+the probe functions that uses pvt->vm_idx[].
+
+This change fixes the bug by moving the incremental channel numbers
+setting to be done only if "intel,vm-map" property is defined (starting
+loop from 0), and removing 'num = 0'.
+
+Fixes: 9d823351a337 ("hwmon: Add hardware monitoring driver for Moortec MR75203 PVT controller")
+Signed-off-by: Eliav Farber <farbere@amazon.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20220908152449.35457-3-farbere@amazon.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/mr75203.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
+index 26278b0f17a98..8b72e8fe34c1b 100644
+--- a/drivers/hwmon/mr75203.c
++++ b/drivers/hwmon/mr75203.c
+@@ -584,7 +584,12 @@ static int mr75203_probe(struct platform_device *pdev)
+               ret = device_property_read_u8_array(dev, "intel,vm-map",
+                                                   pvt->vm_idx, vm_num);
+               if (ret) {
+-                      num = 0;
++                      /*
++                       * Incase intel,vm-map property is not defined, we
++                       * assume incremental channel numbers.
++                       */
++                      for (i = 0; i < vm_num; i++)
++                              pvt->vm_idx[i] = i;
+               } else {
+                       for (i = 0; i < vm_num; i++)
+                               if (pvt->vm_idx[i] >= vm_num ||
+@@ -594,13 +599,6 @@ static int mr75203_probe(struct platform_device *pdev)
+                               }
+               }
+-              /*
+-               * Incase intel,vm-map property is not defined, we assume
+-               * incremental channel numbers.
+-               */
+-              for (i = num; i < vm_num; i++)
+-                      pvt->vm_idx[i] = i;
+-
+               in_config = devm_kcalloc(dev, num + 1,
+                                        sizeof(*in_config), GFP_KERNEL);
+               if (!in_config)
+-- 
+2.35.1
+
diff --git a/queue-5.19/hwmon-mr75203-fix-voltage-equation-for-negative-sour.patch b/queue-5.19/hwmon-mr75203-fix-voltage-equation-for-negative-sour.patch
new file mode 100644 (file)
index 0000000..6f5ad24
--- /dev/null
@@ -0,0 +1,70 @@
+From ccb7b24d884163ff199a3e2e6e943b0143bf2176 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Sep 2022 15:24:32 +0000
+Subject: hwmon: (mr75203) fix voltage equation for negative source input
+
+From: Eliav Farber <farbere@amazon.com>
+
+[ Upstream commit 227a3a2fc31d8e4bb9c88d4804e19530af245b1b ]
+
+According to Moortec Embedded Voltage Monitor (MEVM) series 3 data
+sheet, the minimum input signal is -100mv and maximum input signal
+is +1000mv.
+
+The equation used to convert the digital word to voltage uses mixed
+types (*val signed and n unsigned), and on 64 bit machines also has
+different size, since sizeof(u32) = 4 and sizeof(long) = 8.
+
+So when measuring a negative input, n will be small enough, such that
+PVT_N_CONST * n < PVT_R_CONST, and the result of
+(PVT_N_CONST * n - PVT_R_CONST) will overflow to a very big positive
+32 bit number. Then when storing the result in *val it will be the same
+value just in 64 bit (instead of it representing a negative number which
+will what happen when sizeof(long) = 4).
+
+When -1023 <= (PVT_N_CONST * n - PVT_R_CONST) <= -1
+dividing the number by 1024 should result of in 0, but because ">> 10"
+is used, and the sign bit is used to fill the vacated bit positions, it
+results in -1 (0xf...fffff) which is wrong.
+
+This change fixes the sign problem and supports negative values by
+casting n to long and replacing the shift right with div operation.
+
+Fixes: 9d823351a337 ("hwmon: Add hardware monitoring driver for Moortec MR75203 PVT controller")
+Signed-off-by: Eliav Farber <farbere@amazon.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20220908152449.35457-5-farbere@amazon.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/mr75203.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
+index be02f32bf143d..6d3b3c499ed83 100644
+--- a/drivers/hwmon/mr75203.c
++++ b/drivers/hwmon/mr75203.c
+@@ -201,8 +201,18 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
+                       return ret;
+               n &= SAMPLE_DATA_MSK;
+-              /* Convert the N bitstream count into voltage */
+-              *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS;
++              /*
++               * Convert the N bitstream count into voltage.
++               * To support negative voltage calculation for 64bit machines
++               * n must be cast to long, since n and *val differ both in
++               * signedness and in size.
++               * Division is used instead of right shift, because for signed
++               * numbers, the sign bit is used to fill the vacated bit
++               * positions, and if the number is negative, 1 is used.
++               * BIT(x) may not be used instead of (1 << x) because it's
++               * unsigned.
++               */
++              *val = (PVT_N_CONST * (long)n - PVT_R_CONST) / (1 << PVT_CONV_BITS);
+               return 0;
+       default:
+-- 
+2.35.1
+
diff --git a/queue-5.19/hwmon-mr75203-update-pvt-v_num-and-vm_num-to-the-act.patch b/queue-5.19/hwmon-mr75203-update-pvt-v_num-and-vm_num-to-the-act.patch
new file mode 100644 (file)
index 0000000..d38b590
--- /dev/null
@@ -0,0 +1,42 @@
+From 12712bfccc6ca6a93ea92eea2c4d1abe6dd1477d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Sep 2022 15:24:31 +0000
+Subject: hwmon: (mr75203) update pvt->v_num and vm_num to the actual number of
+ used sensors
+
+From: Eliav Farber <farbere@amazon.com>
+
+[ Upstream commit bb9195bd6664d94d71647631593e09f705ff5edd ]
+
+This issue is relevant when "intel,vm-map" is set in device-tree, and
+defines a lower number of VMs than actually supported.
+
+This change is needed for all places that use pvt->v_num or vm_num
+later on in the code.
+
+Fixes: 9d823351a337 ("hwmon: Add hardware monitoring driver for Moortec MR75203 PVT controller")
+Signed-off-by: Eliav Farber <farbere@amazon.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20220908152449.35457-4-farbere@amazon.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/mr75203.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
+index 8b72e8fe34c1b..be02f32bf143d 100644
+--- a/drivers/hwmon/mr75203.c
++++ b/drivers/hwmon/mr75203.c
+@@ -595,6 +595,8 @@ static int mr75203_probe(struct platform_device *pdev)
+                               if (pvt->vm_idx[i] >= vm_num ||
+                                   pvt->vm_idx[i] == 0xff) {
+                                       num = i;
++                                      pvt->v_num = i;
++                                      vm_num = i;
+                                       break;
+                               }
+               }
+-- 
+2.35.1
+
diff --git a/queue-5.19/i40e-fix-adq-rate-limiting-for-pf.patch b/queue-5.19/i40e-fix-adq-rate-limiting-for-pf.patch
new file mode 100644 (file)
index 0000000..b805586
--- /dev/null
@@ -0,0 +1,58 @@
+From 2b3fd7e0d15c63ab3b58012d27d6b176370aeefb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Aug 2022 10:57:44 +0200
+Subject: i40e: Fix ADQ rate limiting for PF
+
+From: Przemyslaw Patynowski <przemyslawx.patynowski@intel.com>
+
+[ Upstream commit 45bb006d3c924b1201ed43c87a96b437662dcaa8 ]
+
+Fix HW rate limiting for ADQ.
+Fallback to kernel queue selection for ADQ, as it is network stack
+that decides which queue to use for transmit with ADQ configured.
+Reset PF after creation of VMDq2 VSIs required for ADQ, as to
+reprogram TX queue contexts in i40e_configure_tx_ring.
+Without this patch PF would limit TX rate only according to TC0.
+
+Fixes: a9ce82f744dc ("i40e: Enable 'channel' mode in mqprio for TC configs")
+Signed-off-by: Przemyslaw Patynowski <przemyslawx.patynowski@intel.com>
+Signed-off-by: Jan Sokolowski <jan.sokolowski@intel.com>
+Tested-by: Bharathi Sreenivas <bharathi.sreenivas@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 3 +++
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 ++-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 45c56832c14fd..1aaf0c5ddf6cf 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -6536,6 +6536,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
+                       vsi->tc_seid_map[i] = ch->seid;
+               }
+       }
++
++      /* reset to reconfigure TX queue contexts */
++      i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
+       return ret;
+ err_free:
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index af69ccc6e8d2f..07f1e209d524d 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -3689,7 +3689,8 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
+       u8 prio;
+       /* is DCB enabled at all? */
+-      if (vsi->tc_config.numtc == 1)
++      if (vsi->tc_config.numtc == 1 ||
++          i40e_is_tc_mqprio_enabled(vsi->back))
+               return netdev_pick_tx(netdev, skb, sb_dev);
+       prio = skb->priority;
+-- 
+2.35.1
+
diff --git a/queue-5.19/i40e-refactor-tc-mqprio-checks.patch b/queue-5.19/i40e-refactor-tc-mqprio-checks.patch
new file mode 100644 (file)
index 0000000..5357a89
--- /dev/null
@@ -0,0 +1,157 @@
+From 32cb2c5c607765341d8a27a1e0d86c5990b12f94 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jun 2022 13:52:48 +0200
+Subject: i40e: Refactor tc mqprio checks
+
+From: Przemyslaw Patynowski <przemyslawx.patynowski@intel.com>
+
+[ Upstream commit 2313e69c84c024a85d017a60ae925085de47530a ]
+
+Refactor bitwise checks for whether TC MQPRIO is enabled
+into one single method for improved readability.
+
+Signed-off-by: Przemyslaw Patynowski <przemyslawx.patynowski@intel.com>
+Signed-off-by: Jan Sokolowski <jan.sokolowski@intel.com>
+Tested-by: Bharathi Sreenivas <bharathi.sreenivas@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Stable-dep-of: 45bb006d3c92 ("i40e: Fix ADQ rate limiting for PF")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e.h        | 14 +++++++++++++
+ .../net/ethernet/intel/i40e/i40e_ethtool.c    |  2 +-
+ drivers/net/ethernet/intel/i40e/i40e_main.c   | 20 +++++++++----------
+ 3 files changed, 25 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index 407fe8f340a06..c5b61bc80f783 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -1291,4 +1291,18 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+                                     struct i40e_cloud_filter *filter,
+                                     bool add);
++
++/**
++ * i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF
++ * @pf: pointer to a pf.
++ *
++ * Check and return value of flag I40E_FLAG_TC_MQPRIO.
++ *
++ * Return: I40E_FLAG_TC_MQPRIO set state.
++ **/
++static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
++{
++      return pf->flags & I40E_FLAG_TC_MQPRIO;
++}
++
+ #endif /* _I40E_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index 22a61802a4027..ed9984f1e1b9f 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -4931,7 +4931,7 @@ static int i40e_set_channels(struct net_device *dev,
+       /* We do not support setting channels via ethtool when TCs are
+        * configured through mqprio
+        */
+-      if (pf->flags & I40E_FLAG_TC_MQPRIO)
++      if (i40e_is_tc_mqprio_enabled(pf))
+               return -EINVAL;
+       /* verify they are not requesting separate vectors */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 71a8e1698ed48..45c56832c14fd 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -5339,7 +5339,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
+       u8 num_tc = 0;
+       struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+-      if (pf->flags & I40E_FLAG_TC_MQPRIO)
++      if (i40e_is_tc_mqprio_enabled(pf))
+               return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
+       /* If neither MQPRIO nor DCB is enabled, then always use single TC */
+@@ -5371,7 +5371,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
+  **/
+ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
+ {
+-      if (pf->flags & I40E_FLAG_TC_MQPRIO)
++      if (i40e_is_tc_mqprio_enabled(pf))
+               return i40e_mqprio_get_enabled_tc(pf);
+       /* If neither MQPRIO nor DCB is enabled for this PF then just return
+@@ -5468,7 +5468,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
+       int i;
+       /* There is no need to reset BW when mqprio mode is on.  */
+-      if (pf->flags & I40E_FLAG_TC_MQPRIO)
++      if (i40e_is_tc_mqprio_enabled(pf))
+               return 0;
+       if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+               ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
+@@ -5540,7 +5540,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+                                       vsi->tc_config.tc_info[i].qoffset);
+       }
+-      if (pf->flags & I40E_FLAG_TC_MQPRIO)
++      if (i40e_is_tc_mqprio_enabled(pf))
+               return;
+       /* Assign UP2TC map for the VSI */
+@@ -5701,7 +5701,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+       ctxt.vf_num = 0;
+       ctxt.uplink_seid = vsi->uplink_seid;
+       ctxt.info = vsi->info;
+-      if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
++      if (i40e_is_tc_mqprio_enabled(pf)) {
+               ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
+               if (ret)
+                       goto out;
+@@ -6425,7 +6425,7 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
+               pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+               if (vsi->type == I40E_VSI_MAIN) {
+-                      if (pf->flags & I40E_FLAG_TC_MQPRIO)
++                      if (i40e_is_tc_mqprio_enabled(pf))
+                               i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
+                       else
+                               i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+@@ -7819,7 +7819,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
+               netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
+               return ERR_PTR(-EINVAL);
+       }
+-      if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
++      if (i40e_is_tc_mqprio_enabled(pf)) {
+               netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
+               return ERR_PTR(-EINVAL);
+       }
+@@ -8072,7 +8072,7 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
+       /* Quiesce VSI queues */
+       i40e_quiesce_vsi(vsi);
+-      if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
++      if (!hw && !i40e_is_tc_mqprio_enabled(pf))
+               i40e_remove_queue_channels(vsi);
+       /* Configure VSI for enabled TCs */
+@@ -8096,7 +8096,7 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
+                "Setup channel (id:%u) utilizing num_queues %d\n",
+                vsi->seid, vsi->tc_config.tc_info[0].qcount);
+-      if (pf->flags & I40E_FLAG_TC_MQPRIO) {
++      if (i40e_is_tc_mqprio_enabled(pf)) {
+               if (vsi->mqprio_qopt.max_rate[0]) {
+                       u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+@@ -10750,7 +10750,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+        * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
+        * is not supported with new link speed
+        */
+-      if (pf->flags & I40E_FLAG_TC_MQPRIO) {
++      if (i40e_is_tc_mqprio_enabled(pf)) {
+               i40e_aq_set_dcb_parameters(hw, false, NULL);
+       } else {
+               if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
+-- 
+2.35.1
+
diff --git a/queue-5.19/iommu-amd-use-full-64-bit-value-in-build_completion_.patch b/queue-5.19/iommu-amd-use-full-64-bit-value-in-build_completion_.patch
new file mode 100644 (file)
index 0000000..97e1543
--- /dev/null
@@ -0,0 +1,39 @@
+From 5802e79b8ae59ce89a0b277453086a846c1c0ca2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Aug 2022 19:22:29 +0000
+Subject: iommu/amd: use full 64-bit value in build_completion_wait()
+
+From: John Sperbeck <jsperbeck@google.com>
+
+[ Upstream commit 94a568ce32038d8ff9257004bb4632e60eb43a49 ]
+
+We started using a 64 bit completion value.  Unfortunately, we only
+stored the low 32-bits, so a very large completion value would never
+be matched in iommu_completion_wait().
+
+Fixes: c69d89aff393 ("iommu/amd: Use 4K page for completion wait write-back semaphore")
+Signed-off-by: John Sperbeck <jsperbeck@google.com>
+Link: https://lore.kernel.org/r/20220801192229.3358786-1-jsperbeck@google.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd/iommu.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index 840831d5d2ad9..a0924144bac80 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -874,7 +874,8 @@ static void build_completion_wait(struct iommu_cmd *cmd,
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
+       cmd->data[1] = upper_32_bits(paddr);
+-      cmd->data[2] = data;
++      cmd->data[2] = lower_32_bits(data);
++      cmd->data[3] = upper_32_bits(data);
+       CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
+ }
+-- 
+2.35.1
+
diff --git a/queue-5.19/iommu-vt-d-fix-possible-recursive-locking-in-intel_i.patch b/queue-5.19/iommu-vt-d-fix-possible-recursive-locking-in-intel_i.patch
new file mode 100644 (file)
index 0000000..4606af9
--- /dev/null
@@ -0,0 +1,170 @@
+From b7f5eb35b11dc357d3df9d86e832e06eb57d645d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 11 Sep 2022 11:18:45 +0800
+Subject: iommu/vt-d: Fix possible recursive locking in intel_iommu_init()
+
+From: Lu Baolu <baolu.lu@linux.intel.com>
+
+[ Upstream commit 9cd4f1434479f1ac25c440c421fbf52069079914 ]
+
+The global rwsem dmar_global_lock was introduced by commit 3a5670e8ac932
+("iommu/vt-d: Introduce a rwsem to protect global data structures"). It
+is used to protect DMAR related global data from DMAR hotplug operations.
+
+The dmar_global_lock used in the intel_iommu_init() might cause recursive
+locking issue, for example, intel_iommu_get_resv_regions() is taking the
+dmar_global_lock from within a section where intel_iommu_init() already
+holds it via probe_acpi_namespace_devices().
+
+Using dmar_global_lock in intel_iommu_init() could be relaxed since it is
+unlikely that any IO board must be hot added before the IOMMU subsystem is
+initialized. This eliminates the possible recursive locking issue by moving
+down DMAR hotplug support after the IOMMU is initialized and removing the
+uses of dmar_global_lock in intel_iommu_init().
+
+Fixes: d5692d4af08cd ("iommu/vt-d: Fix suspicious RCU usage in probe_acpi_namespace_devices()")
+Reported-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Link: https://lore.kernel.org/r/894db0ccae854b35c73814485569b634237b5538.1657034828.git.robin.murphy@arm.com
+Link: https://lore.kernel.org/r/20220718235325.3952426-1-baolu.lu@linux.intel.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/intel/dmar.c  |  7 +++++++
+ drivers/iommu/intel/iommu.c | 27 ++-------------------------
+ include/linux/dmar.h        |  4 +++-
+ 3 files changed, 12 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index 64b14ac4c7b02..fc8c1420c0b69 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -2368,6 +2368,13 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert)
+       if (!dmar_in_use())
+               return 0;
++      /*
++       * It's unlikely that any I/O board is hot added before the IOMMU
++       * subsystem is initialized.
++       */
++      if (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled)
++              return -EOPNOTSUPP;
++
+       if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
+               tmp = handle;
+       } else {
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 5c0dce78586aa..936def2f416a2 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3123,13 +3123,7 @@ static int __init init_dmars(void)
+ #ifdef CONFIG_INTEL_IOMMU_SVM
+               if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
+-                      /*
+-                       * Call dmar_alloc_hwirq() with dmar_global_lock held,
+-                       * could cause possible lock race condition.
+-                       */
+-                      up_write(&dmar_global_lock);
+                       ret = intel_svm_enable_prq(iommu);
+-                      down_write(&dmar_global_lock);
+                       if (ret)
+                               goto free_iommu;
+               }
+@@ -4035,7 +4029,6 @@ int __init intel_iommu_init(void)
+       force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
+                   platform_optin_force_iommu();
+-      down_write(&dmar_global_lock);
+       if (dmar_table_init()) {
+               if (force_on)
+                       panic("tboot: Failed to initialize DMAR table\n");
+@@ -4048,16 +4041,6 @@ int __init intel_iommu_init(void)
+               goto out_free_dmar;
+       }
+-      up_write(&dmar_global_lock);
+-
+-      /*
+-       * The bus notifier takes the dmar_global_lock, so lockdep will
+-       * complain later when we register it under the lock.
+-       */
+-      dmar_register_bus_notifier();
+-
+-      down_write(&dmar_global_lock);
+-
+       if (!no_iommu)
+               intel_iommu_debugfs_init();
+@@ -4105,11 +4088,9 @@ int __init intel_iommu_init(void)
+               pr_err("Initialization failed\n");
+               goto out_free_dmar;
+       }
+-      up_write(&dmar_global_lock);
+       init_iommu_pm_ops();
+-      down_read(&dmar_global_lock);
+       for_each_active_iommu(iommu, drhd) {
+               /*
+                * The flush queue implementation does not perform
+@@ -4127,13 +4108,11 @@ int __init intel_iommu_init(void)
+                                      "%s", iommu->name);
+               iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
+       }
+-      up_read(&dmar_global_lock);
+       bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
+       if (si_domain && !hw_pass_through)
+               register_memory_notifier(&intel_iommu_memory_nb);
+-      down_read(&dmar_global_lock);
+       if (probe_acpi_namespace_devices())
+               pr_warn("ACPI name space devices didn't probe correctly\n");
+@@ -4144,17 +4123,15 @@ int __init intel_iommu_init(void)
+               iommu_disable_protect_mem_regions(iommu);
+       }
+-      up_read(&dmar_global_lock);
+-
+-      pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
+       intel_iommu_enabled = 1;
++      dmar_register_bus_notifier();
++      pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
+       return 0;
+ out_free_dmar:
+       intel_iommu_free_dmars();
+-      up_write(&dmar_global_lock);
+       return ret;
+ }
+diff --git a/include/linux/dmar.h b/include/linux/dmar.h
+index cbd714a198a0a..f3a3d95df5325 100644
+--- a/include/linux/dmar.h
++++ b/include/linux/dmar.h
+@@ -69,6 +69,7 @@ struct dmar_pci_notify_info {
+ extern struct rw_semaphore dmar_global_lock;
+ extern struct list_head dmar_drhd_units;
++extern int intel_iommu_enabled;
+ #define for_each_drhd_unit(drhd)                                      \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+@@ -92,7 +93,8 @@ extern struct list_head dmar_drhd_units;
+ static inline bool dmar_rcu_check(void)
+ {
+       return rwsem_is_locked(&dmar_global_lock) ||
+-             system_state == SYSTEM_BOOTING;
++             system_state == SYSTEM_BOOTING ||
++             (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled);
+ }
+ #define       dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
+-- 
+2.35.1
+
diff --git a/queue-5.19/kbuild-disable-header-exports-for-uml-in-a-straightf.patch b/queue-5.19/kbuild-disable-header-exports-for-uml-in-a-straightf.patch
new file mode 100644 (file)
index 0000000..fff2481
--- /dev/null
@@ -0,0 +1,42 @@
+From df0094fb32b88e135a8f1a2207f2f5444e58df55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Sep 2022 10:12:52 +0900
+Subject: kbuild: disable header exports for UML in a straightforward way
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit 1b620d539ccc18a1aca1613d9ff078115a7891a1 ]
+
+Previously 'make ARCH=um headers' stopped because of missing
+arch/um/include/uapi/asm/Kbuild.
+
+The error is not shown since commit ed102bf2afed ("um: Fix W=1
+missing-include-dirs warnings") added arch/um/include/uapi/asm/Kbuild.
+
+Hard-code the unsupported architecture, so it works like before.
+
+Fixes: ed102bf2afed ("um: Fix W=1 missing-include-dirs warnings")
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Acked-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Makefile | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index e361c6230e9e5..2acd87dd62591 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1286,8 +1286,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
+ PHONY += headers
+ headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts
+-      $(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \
+-        $(error Headers not exportable for the $(SRCARCH) architecture))
++      $(if $(filter um, $(SRCARCH)), $(error Headers not exportable for UML))
+       $(Q)$(MAKE) $(hdr-inst)=include/uapi
+       $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi
+-- 
+2.35.1
+
diff --git a/queue-5.19/mips-loongson32-ls1c-fix-hang-during-startup.patch b/queue-5.19/mips-loongson32-ls1c-fix-hang-during-startup.patch
new file mode 100644 (file)
index 0000000..18ce2be
--- /dev/null
@@ -0,0 +1,37 @@
+From a89c3bce9e848673f5c1c5cedb87f414a4fa574a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 19:17:25 +0800
+Subject: MIPS: loongson32: ls1c: Fix hang during startup
+
+From: Yang Ling <gnaygnil@gmail.com>
+
+[ Upstream commit 35508d2424097f9b6a1a17aac94f702767035616 ]
+
+The RTCCTRL reg of LS1C is obselete.
+Writing this reg will cause system hang.
+
+Fixes: 60219c563c9b6 ("MIPS: Add RTC support for Loongson1C board")
+Signed-off-by: Yang Ling <gnaygnil@gmail.com>
+Tested-by: Keguang Zhang <keguang.zhang@gmail.com>
+Acked-by: Keguang Zhang <keguang.zhang@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/loongson32/ls1c/board.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c
+index e9de6da0ce51f..9dcfe9de55b0a 100644
+--- a/arch/mips/loongson32/ls1c/board.c
++++ b/arch/mips/loongson32/ls1c/board.c
+@@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] __initdata = {
+ static int __init ls1c_platform_init(void)
+ {
+       ls1x_serial_set_uartclk(&ls1x_uart_pdev);
+-      ls1x_rtc_set_extclk(&ls1x_rtc_pdev);
+       return platform_add_devices(ls1c_platform_devices,
+                                  ARRAY_SIZE(ls1c_platform_devices));
+-- 
+2.35.1
+
diff --git a/queue-5.19/net-bonding-replace-dev_trans_start-with-the-jiffies.patch b/queue-5.19/net-bonding-replace-dev_trans_start-with-the-jiffies.patch
new file mode 100644 (file)
index 0000000..a53504d
--- /dev/null
@@ -0,0 +1,192 @@
+From 95bb8ad2b255fade1889123657c94c9d8044c57f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 31 Jul 2022 15:41:05 +0300
+Subject: net: bonding: replace dev_trans_start() with the jiffies of the last
+ ARP/NS
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 06799a9085e12a778fe2851db550ab5911ad28fe ]
+
+The bonding driver piggybacks on time stamps kept by the network stack
+for the purpose of the netdev TX watchdog, and this is problematic
+because it does not work with NETIF_F_LLTX devices.
+
+It is hard to say why the driver looks at dev_trans_start() of the
+slave->dev, considering that this is updated even by non-ARP/NS probes
+sent by us, and even by traffic not sent by us at all (for example PTP
+on physical slave devices). ARP monitoring in active-backup mode appears
+to still work even if we track only the last TX time of actual ARP
+probes.
+
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Acked-by: Jay Vosburgh <jay.vosburgh@canonical.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 592335a4164c ("bonding: accept unsolicited NA message")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 35 +++++++++++++++++++--------------
+ include/net/bonding.h           | 13 +++++++++++-
+ 2 files changed, 32 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 0cf8c3a125d2e..01b58b7e7f165 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1974,6 +1974,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+       for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
+               new_slave->target_last_arp_rx[i] = new_slave->last_rx;
++      new_slave->last_tx = new_slave->last_rx;
++
+       if (bond->params.miimon && !bond->params.use_carrier) {
+               link_reporting = bond_check_dev_link(bond, slave_dev, 1);
+@@ -2857,8 +2859,11 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
+               return;
+       }
+-      if (bond_handle_vlan(slave, tags, skb))
++      if (bond_handle_vlan(slave, tags, skb)) {
++              slave_update_last_tx(slave);
+               arp_xmit(skb);
++      }
++
+       return;
+ }
+@@ -3047,8 +3052,7 @@ static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+                           curr_active_slave->last_link_up))
+               bond_validate_arp(bond, slave, tip, sip);
+       else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
+-               bond_time_in_interval(bond,
+-                                     dev_trans_start(curr_arp_slave->dev), 1))
++               bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
+               bond_validate_arp(bond, slave, sip, tip);
+ out_unlock:
+@@ -3076,8 +3080,10 @@ static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
+       }
+       addrconf_addr_solict_mult(daddr, &mcaddr);
+-      if (bond_handle_vlan(slave, tags, skb))
++      if (bond_handle_vlan(slave, tags, skb)) {
++              slave_update_last_tx(slave);
+               ndisc_send_skb(skb, &mcaddr, saddr);
++      }
+ }
+ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
+@@ -3222,8 +3228,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
+                           curr_active_slave->last_link_up))
+               bond_validate_ns(bond, slave, saddr, daddr);
+       else if (curr_arp_slave &&
+-               bond_time_in_interval(bond,
+-                                     dev_trans_start(curr_arp_slave->dev), 1))
++               bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
+               bond_validate_ns(bond, slave, saddr, daddr);
+ out:
+@@ -3311,12 +3316,12 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
+        *       so it can wait
+        */
+       bond_for_each_slave_rcu(bond, slave, iter) {
+-              unsigned long trans_start = dev_trans_start(slave->dev);
++              unsigned long last_tx = slave_last_tx(slave);
+               bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
+               if (slave->link != BOND_LINK_UP) {
+-                      if (bond_time_in_interval(bond, trans_start, 1) &&
++                      if (bond_time_in_interval(bond, last_tx, 1) &&
+                           bond_time_in_interval(bond, slave->last_rx, 1)) {
+                               bond_propose_link_state(slave, BOND_LINK_UP);
+@@ -3341,7 +3346,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
+                        * when the source ip is 0, so don't take the link down
+                        * if we don't know our ip yet
+                        */
+-                      if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
++                      if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
+                           !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
+                               bond_propose_link_state(slave, BOND_LINK_DOWN);
+@@ -3407,7 +3412,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
+  */
+ static int bond_ab_arp_inspect(struct bonding *bond)
+ {
+-      unsigned long trans_start, last_rx;
++      unsigned long last_tx, last_rx;
+       struct list_head *iter;
+       struct slave *slave;
+       int commit = 0;
+@@ -3458,9 +3463,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
+                * - (more than missed_max*delta since receive AND
+                *    the bond has an IP address)
+                */
+-              trans_start = dev_trans_start(slave->dev);
++              last_tx = slave_last_tx(slave);
+               if (bond_is_active_slave(slave) &&
+-                  (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
++                  (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
+                    !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
+                       bond_propose_link_state(slave, BOND_LINK_DOWN);
+                       commit++;
+@@ -3477,8 +3482,8 @@ static int bond_ab_arp_inspect(struct bonding *bond)
+  */
+ static void bond_ab_arp_commit(struct bonding *bond)
+ {
+-      unsigned long trans_start;
+       struct list_head *iter;
++      unsigned long last_tx;
+       struct slave *slave;
+       bond_for_each_slave(bond, slave, iter) {
+@@ -3487,10 +3492,10 @@ static void bond_ab_arp_commit(struct bonding *bond)
+                       continue;
+               case BOND_LINK_UP:
+-                      trans_start = dev_trans_start(slave->dev);
++                      last_tx = slave_last_tx(slave);
+                       if (rtnl_dereference(bond->curr_active_slave) != slave ||
+                           (!rtnl_dereference(bond->curr_active_slave) &&
+-                           bond_time_in_interval(bond, trans_start, 1))) {
++                           bond_time_in_interval(bond, last_tx, 1))) {
+                               struct slave *current_arp_slave;
+                               current_arp_slave = rtnl_dereference(bond->current_arp_slave);
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index cb904d356e31e..3b816ae8b1f3b 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -161,8 +161,9 @@ struct slave {
+       struct net_device *dev; /* first - useful for panic debug */
+       struct bonding *bond; /* our master */
+       int    delay;
+-      /* all three in jiffies */
++      /* all 4 in jiffies */
+       unsigned long last_link_up;
++      unsigned long last_tx;
+       unsigned long last_rx;
+       unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
+       s8     link;            /* one of BOND_LINK_XXXX */
+@@ -539,6 +540,16 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
+       return slave->last_rx;
+ }
++static inline void slave_update_last_tx(struct slave *slave)
++{
++      WRITE_ONCE(slave->last_tx, jiffies);
++}
++
++static inline unsigned long slave_last_tx(struct slave *slave)
++{
++      return READ_ONCE(slave->last_tx);
++}
++
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave,
+                                        struct sk_buff *skb)
+-- 
+2.35.1
+
diff --git a/queue-5.19/net-dsa-felix-tc-taprio-intervals-smaller-than-mtu-s.patch b/queue-5.19/net-dsa-felix-tc-taprio-intervals-smaller-than-mtu-s.patch
new file mode 100644 (file)
index 0000000..15ef146
--- /dev/null
@@ -0,0 +1,154 @@
+From 5af06f99ba016e636cb79ac05266dc05813f0175 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Sep 2022 20:01:23 +0300
+Subject: net: dsa: felix: tc-taprio intervals smaller than MTU should send at
+ least one packet
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 11afdc6526de0e0368c05da632a8c0d29fc60bb8 ]
+
+The blamed commit broke tc-taprio schedules such as this one:
+
+tc qdisc replace dev $swp1 root taprio \
+        num_tc 8 \
+        map 0 1 2 3 4 5 6 7 \
+        queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \
+        base-time 0 \
+        sched-entry S 0x7f 990000 \
+        sched-entry S 0x80  10000 \
+        flags 0x2
+
+because the gate entry for TC 7 (S 0x80 10000 ns) now has a static guard
+band added earlier than its 'gate close' event, such that packet
+overruns won't occur in the worst case of the largest packet possible.
+
+Since guard bands are statically determined based on the per-tc
+QSYS_QMAXSDU_CFG_* with a fallback on the port-based QSYS_PORT_MAX_SDU,
+we need to discuss what happens with TC 7 depending on kernel version,
+since the driver, prior to commit 55a515b1f5a9 ("net: dsa: felix: drop
+oversized frames with tc-taprio instead of hanging the port"), did not
+touch QSYS_QMAXSDU_CFG_*, and therefore relied on QSYS_PORT_MAX_SDU.
+
+1 (before vsc9959_tas_guard_bands_update): QSYS_PORT_MAX_SDU defaults to
+  1518, and at gigabit this introduces a static guard band (independent
+  of packet sizes) of 12144 ns, plus QSYS::HSCH_MISC_CFG.FRM_ADJ (bit
+  time of 20 octets => 160 ns). But this is larger than the time window
+  itself, of 10000 ns. So, the queue system never considers a frame with
+  TC 7 as eligible for transmission, since the gate practically never
+  opens, and these frames are forever stuck in the TX queues and hang
+  the port.
+
+2 (after vsc9959_tas_guard_bands_update): Under the sole goal of
+  enabling oversized frame dropping, we make an effort to set
+  QSYS_QMAXSDU_CFG_7 to 1230 bytes. But QSYS_QMAXSDU_CFG_7 plays
+  one more role, which we did not take into account: per-tc static guard
+  band, expressed in L2 byte time (auto-adjusted for FCS and L1 overhead).
+  There is a discrepancy between what the driver thinks (that there is
+  no guard band, and 100% of min_gate_len[tc] is available for egress
+  scheduling) and what the hardware actually does (crops the equivalent
+  of QSYS_QMAXSDU_CFG_7 ns out of min_gate_len[tc]). In practice, this
+  means that the hardware thinks it has exactly 0 ns for scheduling tc 7.
+
+In both cases, even minimum sized Ethernet frames are stuck on egress
+rather than being considered for scheduling on TC 7, even if they would
+fit given a proper configuration. Considering the current situation,
+with vsc9959_tas_guard_bands_update(), frames between 60 octets and 1230
+octets in size are not eligible for oversized dropping (because they are
+smaller than QSYS_QMAXSDU_CFG_7), but won't be considered as eligible
+for scheduling either, because the min_gate_len[7] (10000 ns) minus the
+guard band determined by QSYS_QMAXSDU_CFG_7 (1230 octets * 8 ns per
+octet == 9840 ns) minus the guard band auto-added for L1 overhead by
+QSYS::HSCH_MISC_CFG.FRM_ADJ (20 octets * 8 ns per octet == 160 octets)
+leaves 0 ns for scheduling in the queue system proper.
+
+Investigating the hardware behavior, it becomes apparent that the queue
+system needs precisely 33 ns of 'gate open' time in order to consider a
+frame as eligible for scheduling to a tc. So the solution to this
+problem is to amend vsc9959_tas_guard_bands_update(), by giving the
+per-tc guard bands less space by exactly 33 ns, just enough for one
+frame to be scheduled in that interval. This allows the queue system to
+make forward progress for that port-tc, and prevents it from hanging.
+
+Fixes: 297c4de6f780 ("net: dsa: felix: re-enable TAS guard band mode")
+Reported-by: Xiaoliang Yang <xiaoliang.yang_1@nxp.com>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/ocelot/felix_vsc9959.c | 35 +++++++++++++++++++++++---
+ 1 file changed, 31 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 4cce71243080e..517bc3922ee24 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -22,6 +22,7 @@
+ #define VSC9959_NUM_PORTS             6
+ #define VSC9959_TAS_GCL_ENTRY_MAX     63
++#define VSC9959_TAS_MIN_GATE_LEN_NS   33
+ #define VSC9959_VCAP_POLICER_BASE     63
+ #define VSC9959_VCAP_POLICER_MAX      383
+ #define VSC9959_SWITCH_PCI_BAR                4
+@@ -1411,6 +1412,23 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
+       mdiobus_free(felix->imdio);
+ }
++/* The switch considers any frame (regardless of size) as eligible for
++ * transmission if the traffic class gate is open for at least 33 ns.
++ * Overruns are prevented by cropping an interval at the end of the gate time
++ * slot for which egress scheduling is blocked, but we need to still keep 33 ns
++ * available for one packet to be transmitted, otherwise the port tc will hang.
++ * This function returns the size of a gate interval that remains available for
++ * setting the guard band, after reserving the space for one egress frame.
++ */
++static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
++{
++      /* Gate always open */
++      if (gate_len_ns == U64_MAX)
++              return U64_MAX;
++
++      return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
++}
++
+ /* Extract shortest continuous gate open intervals in ns for each traffic class
+  * of a cyclic tc-taprio schedule. If a gate is always open, the duration is
+  * considered U64_MAX. If the gate is always closed, it is considered 0.
+@@ -1590,10 +1608,13 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+       mutex_lock(&ocelot->fwd_domain_lock);
+       for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
++              u64 remaining_gate_len_ps;
+               u32 max_sdu;
+-              if (min_gate_len[tc] == U64_MAX /* Gate always open */ ||
+-                  min_gate_len[tc] * PSEC_PER_NSEC > needed_bit_time_ps) {
++              remaining_gate_len_ps =
++                      vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
++
++              if (remaining_gate_len_ps > needed_bit_time_ps) {
+                       /* Setting QMAXSDU_CFG to 0 disables oversized frame
+                        * dropping.
+                        */
+@@ -1606,9 +1627,15 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+                       /* If traffic class doesn't support a full MTU sized
+                        * frame, make sure to enable oversize frame dropping
+                        * for frames larger than the smallest that would fit.
++                       *
++                       * However, the exact same register, QSYS_QMAXSDU_CFG_*,
++                       * controls not only oversized frame dropping, but also
++                       * per-tc static guard band lengths, so it reduces the
++                       * useful gate interval length. Therefore, be careful
++                       * to calculate a guard band (and therefore max_sdu)
++                       * that still leaves 33 ns available in the time slot.
+                        */
+-                      max_sdu = div_u64(min_gate_len[tc] * PSEC_PER_NSEC,
+-                                        picos_per_byte);
++                      max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
+                       /* A TC gate may be completely closed, which is a
+                        * special case where all packets are oversized.
+                        * Any limit smaller than 64 octets accomplishes this
+-- 
+2.35.1
+
diff --git a/queue-5.19/perf-evlist-always-use-arch_evlist__add_default_attr.patch b/queue-5.19/perf-evlist-always-use-arch_evlist__add_default_attr.patch
new file mode 100644 (file)
index 0000000..0cc3fe3
--- /dev/null
@@ -0,0 +1,131 @@
+From 736310b703941ea3f062ff8c1b696f9b756100e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Jul 2022 14:57:04 +0800
+Subject: perf evlist: Always use arch_evlist__add_default_attrs()
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+[ Upstream commit a9c1ecdabc4f2ef04ef5334b8deb3a5c5910136d ]
+
+Current perf stat uses the evlist__add_default_attrs() to add the
+generic default attrs, and uses arch_evlist__add_default_attrs() to add
+the Arch specific default attrs, e.g., Topdown for x86.
+
+It works well for the non-hybrid platforms. However, for a hybrid
+platform, the hard code generic default attrs don't work.
+
+Uses arch_evlist__add_default_attrs() to replace the
+evlist__add_default_attrs(). The arch_evlist__add_default_attrs() is
+modified to invoke the same __evlist__add_default_attrs() for the
+generic default attrs. No functional change.
+
+Add default_null_attrs[] to indicate the arch specific attrs.
+No functional change for the arch specific default attrs either.
+
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Acked-by: Ian Rogers <irogers@google.com>
+Acked-by: Namhyung Kim <namhyung@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20220721065706.2886112-4-zhengjun.xing@linux.intel.com
+Signed-off-by: Xing Zhengjun <zhengjun.xing@linux.intel.com>
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Stable-dep-of: f0c86a2bae4f ("perf stat: Fix L2 Topdown metrics disappear for raw events")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/arch/x86/util/evlist.c | 7 ++++++-
+ tools/perf/builtin-stat.c         | 6 +++++-
+ tools/perf/util/evlist.c          | 9 +++++++--
+ tools/perf/util/evlist.h          | 7 +++++--
+ 4 files changed, 23 insertions(+), 6 deletions(-)
+
+diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c
+index 68f681ad54c1e..777bdf182a582 100644
+--- a/tools/perf/arch/x86/util/evlist.c
++++ b/tools/perf/arch/x86/util/evlist.c
+@@ -8,8 +8,13 @@
+ #define TOPDOWN_L1_EVENTS     "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound}"
+ #define TOPDOWN_L2_EVENTS     "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}"
+-int arch_evlist__add_default_attrs(struct evlist *evlist)
++int arch_evlist__add_default_attrs(struct evlist *evlist,
++                                 struct perf_event_attr *attrs,
++                                 size_t nr_attrs)
+ {
++      if (nr_attrs)
++              return __evlist__add_default_attrs(evlist, attrs, nr_attrs);
++
+       if (!pmu_have_event("cpu", "slots"))
+               return 0;
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 5f0333a8acd8a..5aacb7ed8c24a 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -1778,6 +1778,9 @@ static int add_default_attributes(void)
+       (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
+       (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
+ };
++
++      struct perf_event_attr default_null_attrs[] = {};
++
+       /* Set attrs if no event is selected and !null_run: */
+       if (stat_config.null_run)
+               return 0;
+@@ -1959,7 +1962,8 @@ static int add_default_attributes(void)
+                       return -1;
+               stat_config.topdown_level = TOPDOWN_MAX_LEVEL;
+-              if (arch_evlist__add_default_attrs(evsel_list) < 0)
++              /* Platform specific attrs */
++              if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
+                       return -1;
+       }
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index 48af7d379d822..efa5f006b5c61 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -342,9 +342,14 @@ int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *a
+       return evlist__add_attrs(evlist, attrs, nr_attrs);
+ }
+-__weak int arch_evlist__add_default_attrs(struct evlist *evlist __maybe_unused)
++__weak int arch_evlist__add_default_attrs(struct evlist *evlist,
++                                        struct perf_event_attr *attrs,
++                                        size_t nr_attrs)
+ {
+-      return 0;
++      if (!nr_attrs)
++              return 0;
++
++      return __evlist__add_default_attrs(evlist, attrs, nr_attrs);
+ }
+ struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
+diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
+index 1bde9ccf4e7da..129095c0fe6d3 100644
+--- a/tools/perf/util/evlist.h
++++ b/tools/perf/util/evlist.h
+@@ -107,10 +107,13 @@ static inline int evlist__add_default(struct evlist *evlist)
+ int __evlist__add_default_attrs(struct evlist *evlist,
+                                    struct perf_event_attr *attrs, size_t nr_attrs);
++int arch_evlist__add_default_attrs(struct evlist *evlist,
++                                 struct perf_event_attr *attrs,
++                                 size_t nr_attrs);
++
+ #define evlist__add_default_attrs(evlist, array) \
+-      __evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
++      arch_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
+-int arch_evlist__add_default_attrs(struct evlist *evlist);
+ struct evsel *arch_evlist__leader(struct list_head *list);
+ int evlist__add_dummy(struct evlist *evlist);
+-- 
+2.35.1
+
diff --git a/queue-5.19/perf-stat-fix-l2-topdown-metrics-disappear-for-raw-e.patch b/queue-5.19/perf-stat-fix-l2-topdown-metrics-disappear-for-raw-e.patch
new file mode 100644 (file)
index 0000000..473e90b
--- /dev/null
@@ -0,0 +1,128 @@
+From 49a62d1cb2e890f5a95c91f27971eb2bc014b7df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Aug 2022 22:00:57 +0800
+Subject: perf stat: Fix L2 Topdown metrics disappear for raw events
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Zhengjun Xing <zhengjun.xing@linux.intel.com>
+
+[ Upstream commit f0c86a2bae4fd12bfa8bad4d43fb59fb498cdd14 ]
+
+In perf/Documentation/perf-stat.txt, for "--td-level" the default "0" means
+the max level that the current hardware support.
+
+So we need initialize the stat_config.topdown_level to TOPDOWN_MAX_LEVEL
+when “--td-level=0” or no “--td-level” option. Otherwise, for the
+hardware with a max level is 2, the 2nd level metrics disappear for raw
+events in this case.
+
+The issue cannot be observed for the perf stat default or "--topdown"
+options. This commit fixes the raw events issue and removes the
+duplicated code for the perf stat default.
+
+Before:
+
+ # ./perf stat -e "cpu-clock,context-switches,cpu-migrations,page-faults,instructions,cycles,ref-cycles,branches,branch-misses,{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}" sleep 1
+
+ Performance counter stats for 'sleep 1':
+
+              1.03 msec cpu-clock                        #    0.001 CPUs utilized
+                 1      context-switches                 #  966.216 /sec
+                 0      cpu-migrations                   #    0.000 /sec
+                60      page-faults                      #   57.973 K/sec
+         1,132,112      instructions                     #    1.41  insn per cycle
+           803,872      cycles                           #    0.777 GHz
+         1,909,120      ref-cycles                       #    1.845 G/sec
+           236,634      branches                         #  228.640 M/sec
+             6,367      branch-misses                    #    2.69% of all branches
+         4,823,232      slots                            #    4.660 G/sec
+         1,210,536      topdown-retiring                 #     25.1% Retiring
+           699,841      topdown-bad-spec                 #     14.5% Bad Speculation
+         1,777,975      topdown-fe-bound                 #     36.9% Frontend Bound
+         1,134,878      topdown-be-bound                 #     23.5% Backend Bound
+           189,146      topdown-heavy-ops                #  182.756 M/sec
+           662,012      topdown-br-mispredict            #  639.647 M/sec
+         1,097,048      topdown-fetch-lat                #    1.060 G/sec
+           416,121      topdown-mem-bound                #  402.063 M/sec
+
+       1.002423690 seconds time elapsed
+
+       0.002494000 seconds user
+       0.000000000 seconds sys
+
+After:
+
+ # ./perf stat -e "cpu-clock,context-switches,cpu-migrations,page-faults,instructions,cycles,ref-cycles,branches,branch-misses,{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}" sleep 1
+
+ Performance counter stats for 'sleep 1':
+
+              1.13 msec cpu-clock                        #    0.001 CPUs utilized
+                 1      context-switches                 #  882.128 /sec
+                 0      cpu-migrations                   #    0.000 /sec
+                61      page-faults                      #   53.810 K/sec
+         1,137,612      instructions                     #    1.29  insn per cycle
+           881,477      cycles                           #    0.778 GHz
+         2,093,496      ref-cycles                       #    1.847 G/sec
+           236,356      branches                         #  208.496 M/sec
+             7,090      branch-misses                    #    3.00% of all branches
+         5,288,862      slots                            #    4.665 G/sec
+         1,223,697      topdown-retiring                 #     23.1% Retiring
+           767,403      topdown-bad-spec                 #     14.5% Bad Speculation
+         2,053,322      topdown-fe-bound                 #     38.8% Frontend Bound
+         1,244,438      topdown-be-bound                 #     23.5% Backend Bound
+           186,665      topdown-heavy-ops                #      3.5% Heavy Operations       #     19.6% Light Operations
+           725,922      topdown-br-mispredict            #     13.7% Branch Mispredict      #      0.8% Machine Clears
+         1,327,400      topdown-fetch-lat                #     25.1% Fetch Latency          #     13.7% Fetch Bandwidth
+           497,775      topdown-mem-bound                #      9.4% Memory Bound           #     14.1% Core Bound
+
+       1.002701530 seconds time elapsed
+
+       0.002744000 seconds user
+       0.000000000 seconds sys
+
+Fixes: 63e39aa6ae103451 ("perf stat: Support L2 Topdown events")
+Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Xing Zhengjun <zhengjun.xing@linux.intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20220826140057.3289401-1-zhengjun.xing@linux.intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/builtin-stat.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 5aacb7ed8c24a..82e14faecc3e4 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -1944,6 +1944,9 @@ static int add_default_attributes(void)
+               free(str);
+       }
++      if (!stat_config.topdown_level)
++              stat_config.topdown_level = TOPDOWN_MAX_LEVEL;
++
+       if (!evsel_list->core.nr_entries) {
+               if (target__has_cpu(&target))
+                       default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
+@@ -1960,8 +1963,6 @@ static int add_default_attributes(void)
+               }
+               if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
+                       return -1;
+-
+-              stat_config.topdown_level = TOPDOWN_MAX_LEVEL;
+               /* Platform specific attrs */
+               if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
+                       return -1;
+-- 
+2.35.1
+
diff --git a/queue-5.19/s390-boot-fix-absolute-zero-lowcore-corruption-on-bo.patch b/queue-5.19/s390-boot-fix-absolute-zero-lowcore-corruption-on-bo.patch
new file mode 100644 (file)
index 0000000..2c611d7
--- /dev/null
@@ -0,0 +1,54 @@
+From 8c4ad1cb49701ead3f09f1dcdfa930a438c7a3dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Aug 2022 19:45:21 +0200
+Subject: s390/boot: fix absolute zero lowcore corruption on boot
+
+From: Alexander Gordeev <agordeev@linux.ibm.com>
+
+[ Upstream commit 12dd19c159659ec9050f45dc8a2ff3c3917f4be3 ]
+
+Crash dump always starts on CPU0. In case CPU0 is offline the
+prefix page is not installed and the absolute zero lowcore is
+used. However, struct lowcore::mcesad is never assigned and
+stays zero. That leads to __machine_kdump() -> save_vx_regs()
+call silently stores vector registers to the absolute lowcore
+at 0x11b0 offset.
+
+Fixes: a62bc0739253 ("s390/kdump: add support for vector extension")
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/nmi.c   | 2 +-
+ arch/s390/kernel/setup.c | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
+index 53ed3884fe644..5d66e3947070c 100644
+--- a/arch/s390/kernel/nmi.c
++++ b/arch/s390/kernel/nmi.c
+@@ -63,7 +63,7 @@ static inline unsigned long nmi_get_mcesa_size(void)
+  * structure. The structure is required for machine check happening
+  * early in the boot process.
+  */
+-static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
++static struct mcesa boot_mcesa __aligned(MCESA_MAX_SIZE);
+ void __init nmi_alloc_mcesa_early(u64 *mcesad)
+ {
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 0a37f5de28631..3e0361db963ef 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -486,6 +486,7 @@ static void __init setup_lowcore_dat_off(void)
+       put_abs_lowcore(restart_data, lc->restart_data);
+       put_abs_lowcore(restart_source, lc->restart_source);
+       put_abs_lowcore(restart_psw, lc->restart_psw);
++      put_abs_lowcore(mcesad, lc->mcesad);
+       lc->spinlock_lockval = arch_spin_lockval(0);
+       lc->spinlock_index = 0;
+-- 
+2.35.1
+
index ae7622e06a5cb12f64a473413e947cd51002c645..b8eae9765d71b34e4297879c145c2b4958c93bcd 100644 (file)
@@ -159,3 +159,22 @@ asoc-mchp-spdiftx-fix-clang-wbitfield-constant-conversion.patch
 lsm-io_uring-add-lsm-hooks-for-the-new-uring_cmd-file-op.patch
 selinux-implement-the-security_uring_cmd-lsm-hook.patch
 smack-provide-read-control-for-io_uring_cmd.patch
+mips-loongson32-ls1c-fix-hang-during-startup.patch
+kbuild-disable-header-exports-for-uml-in-a-straightf.patch
+i40e-refactor-tc-mqprio-checks.patch
+i40e-fix-adq-rate-limiting-for-pf.patch
+net-bonding-replace-dev_trans_start-with-the-jiffies.patch
+bonding-accept-unsolicited-na-message.patch
+swiotlb-avoid-potential-left-shift-overflow.patch
+iommu-amd-use-full-64-bit-value-in-build_completion_.patch
+s390-boot-fix-absolute-zero-lowcore-corruption-on-bo.patch
+time64.h-consolidate-uses-of-psec_per_nsec.patch
+net-dsa-felix-tc-taprio-intervals-smaller-than-mtu-s.patch
+hwmon-mr75203-fix-vm-sensor-allocation-when-intel-vm.patch
+hwmon-mr75203-update-pvt-v_num-and-vm_num-to-the-act.patch
+hwmon-mr75203-fix-voltage-equation-for-negative-sour.patch
+hwmon-mr75203-fix-multi-channel-voltage-reading.patch
+hwmon-mr75203-enable-polling-for-all-vm-channels.patch
+iommu-vt-d-fix-possible-recursive-locking-in-intel_i.patch
+perf-evlist-always-use-arch_evlist__add_default_attr.patch
+perf-stat-fix-l2-topdown-metrics-disappear-for-raw-e.patch
diff --git a/queue-5.19/swiotlb-avoid-potential-left-shift-overflow.patch b/queue-5.19/swiotlb-avoid-potential-left-shift-overflow.patch
new file mode 100644 (file)
index 0000000..992c8a8
--- /dev/null
@@ -0,0 +1,44 @@
+From 904b9a2795a96304c8bc13542a1ee793d45fb83e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Aug 2022 16:45:37 +0800
+Subject: swiotlb: avoid potential left shift overflow
+
+From: Chao Gao <chao.gao@intel.com>
+
+[ Upstream commit 3f0461613ebcdc8c4073e235053d06d5aa58750f ]
+
+The second operand passed to slot_addr() is declared as int or unsigned int
+in all call sites. The left-shift to get the offset of a slot can overflow
+if swiotlb size is larger than 4G.
+
+Convert the macro to an inline function and declare the second argument as
+phys_addr_t to avoid the potential overflow.
+
+Fixes: 26a7e094783d ("swiotlb: refactor swiotlb_tbl_map_single")
+Signed-off-by: Chao Gao <chao.gao@intel.com>
+Reviewed-by: Dongli Zhang <dongli.zhang@oracle.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/swiotlb.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 5830dce6081b3..ce34d50f7a9bb 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -464,7 +464,10 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
+       }
+ }
+-#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
++static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
++{
++      return start + (idx << IO_TLB_SHIFT);
++}
+ /*
+  * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
+-- 
+2.35.1
+
diff --git a/queue-5.19/time64.h-consolidate-uses-of-psec_per_nsec.patch b/queue-5.19/time64.h-consolidate-uses-of-psec_per_nsec.patch
new file mode 100644 (file)
index 0000000..3a1a706
--- /dev/null
@@ -0,0 +1,109 @@
+From 2084301abedad2d0ec05013bd8ff0f2c3875a59a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jun 2022 17:52:38 +0300
+Subject: time64.h: consolidate uses of PSEC_PER_NSEC
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 837ced3a1a5d8bb1a637dd584711f31ae6b54d93 ]
+
+Time-sensitive networking code needs to work with PTP times expressed in
+nanoseconds, and with packet transmission times expressed in
+picoseconds, since those would be fractional at higher than gigabit
+speed when expressed in nanoseconds.
+
+Convert the existing uses in tc-taprio and the ocelot/felix DSA driver
+to a PSEC_PER_NSEC macro. This macro is placed in include/linux/time64.h
+as opposed to its relatives (PSEC_PER_SEC etc) from include/vdso/time64.h
+because the vDSO library does not (yet) need/use it.
+
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com> # for the vDSO parts
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 11afdc6526de ("net: dsa: felix: tc-taprio intervals smaller than MTU should send at least one packet")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/ocelot/felix_vsc9959.c | 5 +++--
+ include/linux/time64.h                 | 3 +++
+ net/sched/sch_taprio.c                 | 5 +++--
+ 3 files changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index f1767a6b9271c..4cce71243080e 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -16,6 +16,7 @@
+ #include <linux/iopoll.h>
+ #include <linux/mdio.h>
+ #include <linux/pci.h>
++#include <linux/time.h>
+ #include "felix.h"
+ #define VSC9959_NUM_PORTS             6
+@@ -1592,7 +1593,7 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+               u32 max_sdu;
+               if (min_gate_len[tc] == U64_MAX /* Gate always open */ ||
+-                  min_gate_len[tc] * 1000 > needed_bit_time_ps) {
++                  min_gate_len[tc] * PSEC_PER_NSEC > needed_bit_time_ps) {
+                       /* Setting QMAXSDU_CFG to 0 disables oversized frame
+                        * dropping.
+                        */
+@@ -1606,7 +1607,7 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+                        * frame, make sure to enable oversize frame dropping
+                        * for frames larger than the smallest that would fit.
+                        */
+-                      max_sdu = div_u64(min_gate_len[tc] * 1000,
++                      max_sdu = div_u64(min_gate_len[tc] * PSEC_PER_NSEC,
+                                         picos_per_byte);
+                       /* A TC gate may be completely closed, which is a
+                        * special case where all packets are oversized.
+diff --git a/include/linux/time64.h b/include/linux/time64.h
+index 81b9686a20799..2fb8232cff1d5 100644
+--- a/include/linux/time64.h
++++ b/include/linux/time64.h
+@@ -20,6 +20,9 @@ struct itimerspec64 {
+       struct timespec64 it_value;
+ };
++/* Parameters used to convert the timespec values: */
++#define PSEC_PER_NSEC                 1000L
++
+ /* Located here for timespec[64]_valid_strict */
+ #define TIME64_MAX                    ((s64)~((u64)1 << 63))
+ #define TIME64_MIN                    (-TIME64_MAX - 1)
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index b9c71a304d399..0b941dd63d268 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -18,6 +18,7 @@
+ #include <linux/module.h>
+ #include <linux/spinlock.h>
+ #include <linux/rcupdate.h>
++#include <linux/time.h>
+ #include <net/netlink.h>
+ #include <net/pkt_sched.h>
+ #include <net/pkt_cls.h>
+@@ -176,7 +177,7 @@ static ktime_t get_interval_end_time(struct sched_gate_list *sched,
+ static int length_to_duration(struct taprio_sched *q, int len)
+ {
+-      return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
++      return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
+ }
+ /* Returns the entry corresponding to next available interval. If
+@@ -551,7 +552,7 @@ static struct sk_buff *taprio_peek(struct Qdisc *sch)
+ static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
+ {
+       atomic_set(&entry->budget,
+-                 div64_u64((u64)entry->interval * 1000,
++                 div64_u64((u64)entry->interval * PSEC_PER_NSEC,
+                            atomic64_read(&q->picos_per_byte)));
+ }
+-- 
+2.35.1
+