--- /dev/null
+From d0356ace63c30682e485fe4cd0a84e12f2d95b91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Sep 2024 15:28:39 +0500
+Subject: fou: fix initialization of grc
+
+From: Muhammad Usama Anjum <usama.anjum@collabora.com>
+
+[ Upstream commit 4c8002277167125078e6b9b90137bdf443ebaa08 ]
+
+The grc must be initialize first. There can be a condition where if
+fou is NULL, goto out will be executed and grc would be used
+uninitialized.
+
+Fixes: 7e4196935069 ("fou: Fix null-ptr-deref in GRO.")
+Signed-off-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/20240906102839.202798-1-usama.anjum@collabora.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/fou.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
+index 135da756dd5a..1d67df4d8ed6 100644
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -334,11 +334,11 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
+ struct gro_remcsum grc;
+ u8 proto;
+
++ skb_gro_remcsum_init(&grc);
++
+ if (!fou)
+ goto out;
+
+- skb_gro_remcsum_init(&grc);
+-
+ off = skb_gro_offset(skb);
+ len = off + sizeof(*guehdr);
+
+--
+2.43.0
+
--- /dev/null
+From 61c18176fffd7e7b4c9e6dd5241c304816690acd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Sep 2024 11:30:28 +0200
+Subject: hwmon: (pmbus) Conditionally clear individual status bits for pmbus
+ rev >= 1.2
+
+From: Patryk Biel <pbiel7@gmail.com>
+
+[ Upstream commit 20471071f198c8626dbe3951ac9834055b387844 ]
+
+The current implementation of pmbus_show_boolean assumes that all devices
+support write-back operation of status register to clear pending warnings
+or faults. Since clearing individual bits in the status registers was only
+introduced in PMBus specification 1.2, this operation may not be supported
+by some older devices. This can result in an error while reading boolean
+attributes such as temp1_max_alarm.
+
+Fetch PMBus revision supported by the device and modify pmbus_show_boolean
+so that it only tries to clear individual status bits if the device is
+compliant with PMBus specs >= 1.2. Otherwise clear all fault indicators
+on the current page after a fault status was reported.
+
+Fixes: 35f165f08950a ("hwmon: (pmbus) Clear pmbus fault/warning bits after read")
+Signed-off-by: Patryk Biel <pbiel7@gmail.com>
+Message-ID: <20240909-pmbus-status-reg-clearing-v1-1-f1c0d68c6408@gmail.com>
+[groeck:
+ Rewrote description
+ Moved revision detection code ahead of clear faults command
+ Assigned revision if return value from PMBUS_REVISION command is 0
+ Improved return value check from calling _pmbus_write_byte_data()]
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/pmbus/pmbus.h | 6 ++++++
+ drivers/hwmon/pmbus/pmbus_core.c | 17 ++++++++++++++---
+ 2 files changed, 20 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
+index e2a570930bd7..62260553f483 100644
+--- a/drivers/hwmon/pmbus/pmbus.h
++++ b/drivers/hwmon/pmbus/pmbus.h
+@@ -409,6 +409,12 @@ enum pmbus_sensor_classes {
+ enum pmbus_data_format { linear = 0, direct, vid };
+ enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
+
++/* PMBus revision identifiers */
++#define PMBUS_REV_10 0x00 /* PMBus revision 1.0 */
++#define PMBUS_REV_11 0x11 /* PMBus revision 1.1 */
++#define PMBUS_REV_12 0x22 /* PMBus revision 1.2 */
++#define PMBUS_REV_13 0x33 /* PMBus revision 1.3 */
++
+ struct pmbus_driver_info {
+ int pages; /* Total number of pages */
+ u8 phases[PMBUS_PAGES]; /* Number of phases per page */
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index cc9ce5b2f0f2..1ef214a8a01b 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -82,6 +82,8 @@ struct pmbus_data {
+
+ u32 flags; /* from platform data */
+
++ u8 revision; /* The PMBus revision the device is compliant with */
++
+ int exponent[PMBUS_PAGES];
+ /* linear mode: exponent for output voltages */
+
+@@ -930,9 +932,14 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
+
+ regval = status & mask;
+ if (regval) {
+- ret = _pmbus_write_byte_data(client, page, reg, regval);
+- if (ret)
+- goto unlock;
++ if (data->revision >= PMBUS_REV_12) {
++ ret = _pmbus_write_byte_data(client, page, reg, regval);
++ if (ret)
++ goto unlock;
++ } else {
++ pmbus_clear_fault_page(client, page);
++ }
++
+ }
+ if (s1 && s2) {
+ s64 v1, v2;
+@@ -2370,6 +2377,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
+ data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+ }
+
++ ret = i2c_smbus_read_byte_data(client, PMBUS_REVISION);
++ if (ret >= 0)
++ data->revision = ret;
++
+ if (data->info->pages)
+ pmbus_clear_faults(client);
+ else
+--
+2.43.0
+
--- /dev/null
+From 17e7c2bc6f1c343bdb8bd98bf59e7181524a2c44 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Apr 2022 16:40:36 +0200
+Subject: hwmon: (pmbus) Introduce and use write_byte_data callback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mårten Lindahl <marten.lindahl@axis.com>
+
+[ Upstream commit 5de3e13f7f6b496bd7bd9ff4d2b915b7d3e67cda ]
+
+Some of the pmbus core functions uses pmbus_write_byte_data, which does
+not support driver callbacks for chip specific write operations. This
+could potentially influence some specific regulator chips that for
+example need a time delay before each data access.
+
+Lets add support for driver callback with _pmbus_write_byte_data.
+
+Signed-off-by: Mårten Lindahl <marten.lindahl@axis.com>
+Link: https://lore.kernel.org/r/20220428144039.2464667-2-marten.lindahl@axis.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Stable-dep-of: 20471071f198 ("hwmon: (pmbus) Conditionally clear individual status bits for pmbus rev >= 1.2")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/pmbus/pmbus.h | 2 ++
+ drivers/hwmon/pmbus/pmbus_core.c | 24 +++++++++++++++++++++---
+ 2 files changed, 23 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
+index ef3a8ecde4df..e2a570930bd7 100644
+--- a/drivers/hwmon/pmbus/pmbus.h
++++ b/drivers/hwmon/pmbus/pmbus.h
+@@ -438,6 +438,8 @@ struct pmbus_driver_info {
+ int (*read_byte_data)(struct i2c_client *client, int page, int reg);
+ int (*read_word_data)(struct i2c_client *client, int page, int phase,
+ int reg);
++ int (*write_byte_data)(struct i2c_client *client, int page, int reg,
++ u8 byte);
+ int (*write_word_data)(struct i2c_client *client, int page, int reg,
+ u16 word);
+ int (*write_byte)(struct i2c_client *client, int page, u8 value);
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index 63b616ce3a6e..cc9ce5b2f0f2 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -276,6 +276,24 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
+ return pmbus_write_word_data(client, page, reg, word);
+ }
+
++/*
++ * _pmbus_write_byte_data() is similar to pmbus_write_byte_data(), but checks if
++ * a device specific mapping function exists and calls it if necessary.
++ */
++static int _pmbus_write_byte_data(struct i2c_client *client, int page, int reg, u8 value)
++{
++ struct pmbus_data *data = i2c_get_clientdata(client);
++ const struct pmbus_driver_info *info = data->info;
++ int status;
++
++ if (info->write_byte_data) {
++ status = info->write_byte_data(client, page, reg, value);
++ if (status != -ENODATA)
++ return status;
++ }
++ return pmbus_write_byte_data(client, page, reg, value);
++}
++
+ int pmbus_update_fan(struct i2c_client *client, int page, int id,
+ u8 config, u8 mask, u16 command)
+ {
+@@ -290,7 +308,7 @@ int pmbus_update_fan(struct i2c_client *client, int page, int id,
+
+ to = (from & ~mask) | (config & mask);
+ if (to != from) {
+- rv = pmbus_write_byte_data(client, page,
++ rv = _pmbus_write_byte_data(client, page,
+ pmbus_fan_config_registers[id], to);
+ if (rv < 0)
+ return rv;
+@@ -397,7 +415,7 @@ int pmbus_update_byte_data(struct i2c_client *client, int page, u8 reg,
+ tmp = (rv & ~mask) | (value & mask);
+
+ if (tmp != rv)
+- rv = pmbus_write_byte_data(client, page, reg, tmp);
++ rv = _pmbus_write_byte_data(client, page, reg, tmp);
+
+ return rv;
+ }
+@@ -912,7 +930,7 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
+
+ regval = status & mask;
+ if (regval) {
+- ret = pmbus_write_byte_data(client, page, reg, regval);
++ ret = _pmbus_write_byte_data(client, page, reg, regval);
+ if (ret)
+ goto unlock;
+ }
+--
+2.43.0
+
--- /dev/null
+From 994b22f148e56172424867a220476cd478c66178 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Jul 2024 09:55:55 -0700
+Subject: ice: fix accounting for filters shared by multiple VSIs
+
+From: Jacob Keller <jacob.e.keller@intel.com>
+
+[ Upstream commit e843cf7b34fe2e0c1afc55e1f3057375c9b77a14 ]
+
+When adding a switch filter (such as a MAC or VLAN filter), it is expected
+that the driver will detect the case where the filter already exists, and
+return -EEXIST. This is used by calling code such as ice_vc_add_mac_addr,
+and ice_vsi_add_vlan to avoid incrementing the accounting fields such as
+vsi->num_vlan or vf->num_mac.
+
+This logic works correctly for the case where only a single VSI has added a
+given switch filter.
+
+When a second VSI adds the same switch filter, the driver converts the
+existing filter from an ICE_FWD_TO_VSI filter into an ICE_FWD_TO_VSI_LIST
+filter. This saves switch resources, by ensuring that multiple VSIs can
+re-use the same filter.
+
+The ice_add_update_vsi_list() function is responsible for doing this
+conversion. When first converting a filter from the FWD_TO_VSI into
+FWD_TO_VSI_LIST, it checks if the VSI being added is the same as the
+existing rule's VSI. In such a case it returns -EEXIST.
+
+However, when the switch rule has already been converted to a
+FWD_TO_VSI_LIST, the logic is different. Adding a new VSI in this case just
+requires extending the VSI list entry. The logic for checking if the rule
+already exists in this case returns 0 instead of -EEXIST.
+
+This breaks the accounting logic mentioned above, so the counters for how
+many MAC and VLAN filters exist for a given VF or VSI no longer accurately
+reflect the actual count. This breaks other code which relies on these
+counts.
+
+In typical usage this primarily affects such filters generally shared by
+multiple VSIs such as VLAN 0, or broadcast and multicast MAC addresses.
+
+Fix this by correctly reporting -EEXIST in the case of adding the same VSI
+to a switch rule already converted to ICE_FWD_TO_VSI_LIST.
+
+Fixes: 9daf8208dd4d ("ice: Add support for switch filter programming")
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_switch.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index deb828e761fa..128c67c6de77 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -1286,7 +1286,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
+
+ /* A rule already exists with the new VSI being added */
+ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
+- return 0;
++ return -EEXIST;
+
+ /* Update the previously created VSI list set with
+ * the new VSI ID passed in
+--
+2.43.0
+
--- /dev/null
+From 13ef28d6f4a0eefa37b1a6b722b539d3957faf56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 09:42:07 +0200
+Subject: igb: Always call igb_xdp_ring_update_tail() under Tx lock
+
+From: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
+
+[ Upstream commit 27717f8b17c098c4373ddb8fe89e1a1899c7779d ]
+
+Always call igb_xdp_ring_update_tail() under __netif_tx_lock, add a comment
+and lockdep assert to indicate that. This is needed to share the same TX
+ring between XDP, XSK and slow paths. Furthermore, the current XDP
+implementation is racy on tail updates.
+
+Fixes: 9cbc948b5a20 ("igb: add XDP support")
+Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
+[Kurt: Add lockdep assert and fixes tag]
+Signed-off-by: Kurt Kanzenbach <kurt@linutronix.de>
+Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 420bc34fb8c1..559ddb40347c 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -34,6 +34,7 @@
+ #include <linux/bpf_trace.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/etherdevice.h>
++#include <linux/lockdep.h>
+ #ifdef CONFIG_IGB_DCA
+ #include <linux/dca.h>
+ #endif
+@@ -2893,8 +2894,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+ }
+ }
+
++/* This function assumes __netif_tx_lock is held by the caller. */
+ static void igb_xdp_ring_update_tail(struct igb_ring *ring)
+ {
++ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
++
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+@@ -2979,11 +2983,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
+ nxmit++;
+ }
+
+- __netif_tx_unlock(nq);
+-
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ igb_xdp_ring_update_tail(tx_ring);
+
++ __netif_tx_unlock(nq);
++
+ return nxmit;
+ }
+
+@@ -8703,12 +8707,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
+
+ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ {
++ unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+- struct sk_buff *skb = rx_ring->skb;
+- unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
++ struct sk_buff *skb = rx_ring->skb;
++ int cpu = smp_processor_id();
+ unsigned int xdp_xmit = 0;
++ struct netdev_queue *nq;
+ struct xdp_buff xdp;
+ u32 frame_sz = 0;
+ int rx_buf_pgcnt;
+@@ -8835,7 +8841,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ if (xdp_xmit & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
++ nq = txring_txq(tx_ring);
++ __netif_tx_lock(nq, cpu);
+ igb_xdp_ring_update_tail(tx_ring);
++ __netif_tx_unlock(nq);
+ }
+
+ u64_stats_update_begin(&rx_ring->rx_syncp);
+--
+2.43.0
+
--- /dev/null
+From c9a3c1a81923cab61e0da9c507091b80b44e1c76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Sep 2024 10:31:44 -0400
+Subject: net: dpaa: Pad packets to ETH_ZLEN
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+[ Upstream commit cbd7ec083413c6a2e0c326d49e24ec7d12c7a9e0 ]
+
+When sending packets under 60 bytes, up to three bytes of the buffer
+following the data may be leaked. Avoid this by extending all packets to
+ETH_ZLEN, ensuring nothing is leaked in the padding. This bug can be
+reproduced by running
+
+ $ ping -s 11 destination
+
+Fixes: 9ad1a3749333 ("dpaa_eth: add support for DPAA Ethernet")
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20240910143144.1439910-1-sean.anderson@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index 119f560b2e65..6fbf4efa0786 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -2269,12 +2269,12 @@ static netdev_tx_t
+ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+ {
+ const int queue_mapping = skb_get_queue_mapping(skb);
+- bool nonlinear = skb_is_nonlinear(skb);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
+ struct netdev_queue *txq;
+ struct dpaa_priv *priv;
+ struct qm_fd fd;
++ bool nonlinear;
+ int offset = 0;
+ int err = 0;
+
+@@ -2284,6 +2284,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+
+ qm_fd_clear_fd(&fd);
+
++ /* Packet data is always read as 32-bit words, so zero out any part of
++ * the skb which might be sent if we have to pad the packet
++ */
++ if (__skb_put_padto(skb, ETH_ZLEN, false))
++ goto enomem;
++
++ nonlinear = skb_is_nonlinear(skb);
+ if (!nonlinear) {
+ /* We're going to store the skb backpointer at the beginning
+ * of the data buffer, so we need a privately owned skb
+--
+2.43.0
+
--- /dev/null
+From 331ca401b2ef7265d5610e14081e9aa57cf9ef52 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Sep 2024 14:28:31 +0800
+Subject: net: ftgmac100: Enable TX interrupt to avoid TX timeout
+
+From: Jacky Chou <jacky_chou@aspeedtech.com>
+
+[ Upstream commit fef2843bb49f414d1523ca007d088071dee0e055 ]
+
+Currently, the driver only enables RX interrupt to handle RX
+packets and TX resources. Sometimes there is not RX traffic,
+so the TX resource needs to wait for RX interrupt to free.
+This situation will toggle the TX timeout watchdog when the MAC
+TX ring has no more resources to transmit packets.
+Therefore, enable TX interrupt to release TX resources at any time.
+
+When I am verifying iperf3 over UDP, the network hangs.
+Like the log below.
+
+root# iperf3 -c 192.168.100.100 -i1 -t10 -u -b0
+Connecting to host 192.168.100.100, port 5201
+[ 4] local 192.168.100.101 port 35773 connected to 192.168.100.100 port 5201
+[ ID] Interval Transfer Bandwidth Total Datagrams
+[ 4] 0.00-20.42 sec 160 KBytes 64.2 Kbits/sec 20
+[ 4] 20.42-20.42 sec 0.00 Bytes 0.00 bits/sec 0
+[ 4] 20.42-20.42 sec 0.00 Bytes 0.00 bits/sec 0
+[ 4] 20.42-20.42 sec 0.00 Bytes 0.00 bits/sec 0
+[ 4] 20.42-20.42 sec 0.00 Bytes 0.00 bits/sec 0
+[ 4] 20.42-20.42 sec 0.00 Bytes 0.00 bits/sec 0
+[ 4] 20.42-20.42 sec 0.00 Bytes 0.00 bits/sec 0
+[ 4] 20.42-20.42 sec 0.00 Bytes 0.00 bits/sec 0
+[ 4] 20.42-20.42 sec 0.00 Bytes 0.00 bits/sec 0
+[ 4] 20.42-20.42 sec 0.00 Bytes 0.00 bits/sec 0
+- - - - - - - - - - - - - - - - - - - - - - - - -
+[ ID] Interval Transfer Bandwidth Jitter Lost/Total Datagrams
+[ 4] 0.00-20.42 sec 160 KBytes 64.2 Kbits/sec 0.000 ms 0/20 (0%)
+[ 4] Sent 20 datagrams
+iperf3: error - the server has terminated
+
+The network topology is FTGMAC connects directly to a PC.
+UDP does not need to wait for ACK, unlike TCP.
+Therefore, FTGMAC needs to enable TX interrupt to release TX resources instead
+of waiting for the RX interrupt.
+
+Fixes: 10cbd6407609 ("ftgmac100: Rework NAPI & interrupts handling")
+Signed-off-by: Jacky Chou <jacky_chou@aspeedtech.com>
+Link: https://patch.msgid.link/20240906062831.2243399-1-jacky_chou@aspeedtech.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/faraday/ftgmac100.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
+index 63b3e02fab16..4968f6f0bdbc 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.h
++++ b/drivers/net/ethernet/faraday/ftgmac100.h
+@@ -84,7 +84,7 @@
+ FTGMAC100_INT_RPKT_BUF)
+
+ /* All the interrupts we care about */
+-#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \
++#define FTGMAC100_INT_ALL (FTGMAC100_INT_RXTX | \
+ FTGMAC100_INT_BAD)
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From 4b68365efca33fe0c945524fa7b01f54d23f51bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jun 2022 13:04:47 -0700
+Subject: net/mlx5: Add IFC bits and enums for flow meter
+
+From: Jianbo Liu <jianbol@nvidia.com>
+
+[ Upstream commit f5d23ee137e51b4e5cd5d263b144d5e6719f6e52 ]
+
+Add/extend structure layouts and defines for flow meter.
+
+Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
+Reviewed-by: Ariel Levkovich <lariel@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Stable-dep-of: 452ef7f86036 ("net/mlx5: Add missing masks and QoS bit masks for scheduling elements")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/mlx5/device.h | 1 +
+ include/linux/mlx5/mlx5_ifc.h | 114 ++++++++++++++++++++++++++++++++--
+ 2 files changed, 111 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
+index 3e72133545ca..1bb4945885ce 100644
+--- a/include/linux/mlx5/device.h
++++ b/include/linux/mlx5/device.h
+@@ -454,6 +454,7 @@ enum {
+
+ MLX5_OPCODE_UMR = 0x25,
+
++ MLX5_OPCODE_ACCESS_ASO = 0x2d,
+ };
+
+ enum {
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 5151573da9b2..e42d6d2d8ecb 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -434,7 +434,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
+ u8 max_modify_header_actions[0x8];
+ u8 max_ft_level[0x8];
+
+- u8 reserved_at_40[0x20];
++ u8 reserved_at_40[0x6];
++ u8 execute_aso[0x1];
++ u8 reserved_at_47[0x19];
+
+ u8 reserved_at_60[0x2];
+ u8 reformat_insert[0x1];
+@@ -889,7 +891,17 @@ struct mlx5_ifc_qos_cap_bits {
+
+ u8 max_tsar_bw_share[0x20];
+
+- u8 reserved_at_100[0x700];
++ u8 reserved_at_100[0x20];
++
++ u8 reserved_at_120[0x3];
++ u8 log_meter_aso_granularity[0x5];
++ u8 reserved_at_128[0x3];
++ u8 log_meter_aso_max_alloc[0x5];
++ u8 reserved_at_130[0x3];
++ u8 log_max_num_meter_aso[0x5];
++ u8 reserved_at_138[0x8];
++
++ u8 reserved_at_140[0x6c0];
+ };
+
+ struct mlx5_ifc_debug_cap_bits {
+@@ -3156,6 +3168,7 @@ enum {
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
+ MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000,
+ MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000,
++ MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO = 0x4000,
+ };
+
+ enum {
+@@ -3171,6 +3184,38 @@ struct mlx5_ifc_vlan_bits {
+ u8 vid[0xc];
+ };
+
++enum {
++ MLX5_FLOW_METER_COLOR_RED = 0x0,
++ MLX5_FLOW_METER_COLOR_YELLOW = 0x1,
++ MLX5_FLOW_METER_COLOR_GREEN = 0x2,
++ MLX5_FLOW_METER_COLOR_UNDEFINED = 0x3,
++};
++
++enum {
++ MLX5_EXE_ASO_FLOW_METER = 0x2,
++};
++
++struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits {
++ u8 return_reg_id[0x4];
++ u8 aso_type[0x4];
++ u8 reserved_at_8[0x14];
++ u8 action[0x1];
++ u8 init_color[0x2];
++ u8 meter_id[0x1];
++};
++
++union mlx5_ifc_exe_aso_ctrl {
++ struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits exe_aso_ctrl_flow_meter;
++};
++
++struct mlx5_ifc_execute_aso_bits {
++ u8 valid[0x1];
++ u8 reserved_at_1[0x7];
++ u8 aso_object_id[0x18];
++
++ union mlx5_ifc_exe_aso_ctrl exe_aso_ctrl;
++};
++
+ struct mlx5_ifc_flow_context_bits {
+ struct mlx5_ifc_vlan_bits push_vlan;
+
+@@ -3202,7 +3247,9 @@ struct mlx5_ifc_flow_context_bits {
+
+ struct mlx5_ifc_fte_match_param_bits match_value;
+
+- u8 reserved_at_1200[0x600];
++ struct mlx5_ifc_execute_aso_bits execute_aso[4];
++
++ u8 reserved_at_1300[0x500];
+
+ union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[];
+ };
+@@ -5825,7 +5872,9 @@ struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+
+ u8 obj_id[0x20];
+
+- u8 reserved_at_60[0x20];
++ u8 reserved_at_60[0x3];
++ u8 log_obj_range[0x5];
++ u8 reserved_at_68[0x18];
+ };
+
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+@@ -11190,12 +11239,14 @@ enum {
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20),
++ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = BIT_ULL(0x24),
+ };
+
+ enum {
+ MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
+ MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
+ MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
++ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
+ };
+
+ enum {
+@@ -11270,6 +11321,61 @@ struct mlx5_ifc_create_encryption_key_in_bits {
+ struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
+ };
+
++enum {
++ MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH = 0x0,
++ MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2 = 0x1,
++ MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2_IPG = 0x2,
++ MLX5_FLOW_METER_MODE_NUM_PACKETS = 0x3,
++};
++
++struct mlx5_ifc_flow_meter_parameters_bits {
++ u8 valid[0x1];
++ u8 bucket_overflow[0x1];
++ u8 start_color[0x2];
++ u8 both_buckets_on_green[0x1];
++ u8 reserved_at_5[0x1];
++ u8 meter_mode[0x2];
++ u8 reserved_at_8[0x18];
++
++ u8 reserved_at_20[0x20];
++
++ u8 reserved_at_40[0x3];
++ u8 cbs_exponent[0x5];
++ u8 cbs_mantissa[0x8];
++ u8 reserved_at_50[0x3];
++ u8 cir_exponent[0x5];
++ u8 cir_mantissa[0x8];
++
++ u8 reserved_at_60[0x20];
++
++ u8 reserved_at_80[0x3];
++ u8 ebs_exponent[0x5];
++ u8 ebs_mantissa[0x8];
++ u8 reserved_at_90[0x3];
++ u8 eir_exponent[0x5];
++ u8 eir_mantissa[0x8];
++
++ u8 reserved_at_a0[0x60];
++};
++
++struct mlx5_ifc_flow_meter_aso_obj_bits {
++ u8 modify_field_select[0x40];
++
++ u8 reserved_at_40[0x40];
++
++ u8 reserved_at_80[0x8];
++ u8 meter_aso_access_pd[0x18];
++
++ u8 reserved_at_a0[0x160];
++
++ struct mlx5_ifc_flow_meter_parameters_bits flow_meter_parameters[2];
++};
++
++struct mlx5_ifc_create_flow_meter_aso_obj_in_bits {
++ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
++ struct mlx5_ifc_flow_meter_aso_obj_bits flow_meter_aso_obj;
++};
++
+ struct mlx5_ifc_sampler_obj_bits {
+ u8 modify_field_select[0x40];
+
+--
+2.43.0
+
--- /dev/null
+From 53f8504dc5520d80897590c13b975df78a3ad3a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Aug 2024 10:03:20 +0300
+Subject: net/mlx5: Add missing masks and QoS bit masks for scheduling elements
+
+From: Carolina Jubran <cjubran@nvidia.com>
+
+[ Upstream commit 452ef7f86036392005940de54228d42ca0044192 ]
+
+Add the missing masks for supported element types and Transmit
+Scheduling Arbiter (TSAR) types in scheduling elements.
+
+Also, add the corresponding bit masks for these types in the QoS
+capabilities of a NIC scheduler.
+
+Fixes: 214baf22870c ("net/mlx5e: Support HTB offload")
+Signed-off-by: Carolina Jubran <cjubran@nvidia.com>
+Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/mlx5/mlx5_ifc.h | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index e42d6d2d8ecb..d974c235ad8e 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -891,7 +891,8 @@ struct mlx5_ifc_qos_cap_bits {
+
+ u8 max_tsar_bw_share[0x20];
+
+- u8 reserved_at_100[0x20];
++ u8 nic_element_type[0x10];
++ u8 nic_tsar_type[0x10];
+
+ u8 reserved_at_120[0x3];
+ u8 log_meter_aso_granularity[0x5];
+@@ -3521,6 +3522,7 @@ enum {
+ ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
++ ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4,
+ };
+
+ struct mlx5_ifc_scheduling_context_bits {
+@@ -4187,6 +4189,12 @@ enum {
+ TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+ };
+
++enum {
++ TSAR_TYPE_CAP_MASK_DWRR = 1 << 0,
++ TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1,
++ TSAR_TYPE_CAP_MASK_ETS = 1 << 2,
++};
++
+ struct mlx5_ifc_tsar_element_bits {
+ u8 reserved_at_0[0x8];
+ u8 tsar_type[0x8];
+--
+2.43.0
+
--- /dev/null
+From 230958289c60eda532d044a892f7626f654bb2ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jul 2021 17:48:26 +0300
+Subject: net/mlx5: Add support to create match definer
+
+From: Maor Gottlieb <maorg@nvidia.com>
+
+[ Upstream commit e7e2519e3632396a25031b7e828ed35332e5dd07 ]
+
+Introduce new APIs to create and destroy flow matcher
+for given format id.
+
+Flow match definer object is used for defining the fields and
+mask used for the hash calculation. User should mask the desired
+fields like done in the match criteria.
+
+This object is assigned to flow group of type hash. In this flow
+group type, packets lookup is done based on the hash result.
+
+This patch also adds the required bits to create such flow group.
+
+Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Stable-dep-of: 452ef7f86036 ("net/mlx5: Add missing masks and QoS bit masks for scheduling elements")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/fs_cmd.c | 57 ++++
+ .../net/ethernet/mellanox/mlx5/core/fs_cmd.h | 4 +
+ .../net/ethernet/mellanox/mlx5/core/fs_core.c | 46 +++
+ .../net/ethernet/mellanox/mlx5/core/fs_core.h | 5 +
+ .../mellanox/mlx5/core/steering/fs_dr.c | 15 +
+ include/linux/mlx5/fs.h | 8 +
+ include/linux/mlx5/mlx5_ifc.h | 272 ++++++++++++++++--
+ 7 files changed, 380 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+index 7db8df64a60e..57e1fa2fe5f7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+@@ -185,6 +185,20 @@ static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
+ return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
+ }
+
++static int
++mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
++ int definer_id)
++{
++ return 0;
++}
++
++static int
++mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns,
++ u16 format_id, u32 *match_mask)
++{
++ return 0;
++}
++
+ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft, u32 underlay_qpn,
+ bool disconnect)
+@@ -909,6 +923,45 @@ static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
+ }
+
++static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
++ int definer_id)
++{
++ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
++ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
++
++ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
++ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
++ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
++ MLX5_OBJ_TYPE_MATCH_DEFINER);
++ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
++
++ return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out));
++}
++
++static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
++ u16 format_id, u32 *match_mask)
++{
++ u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {};
++ u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
++ struct mlx5_core_dev *dev = ns->dev;
++ void *ptr;
++ int err;
++
++ MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode,
++ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
++ MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type,
++ MLX5_OBJ_TYPE_MATCH_DEFINER);
++
++ ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
++ MLX5_SET(match_definer, ptr, format_id, format_id);
++
++ ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
++ memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
++
++ err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out);
++ return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
++}
++
+ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
+ .create_flow_table = mlx5_cmd_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_destroy_flow_table,
+@@ -923,6 +976,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
+ .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
++ .create_match_definer = mlx5_cmd_create_match_definer,
++ .destroy_match_definer = mlx5_cmd_destroy_match_definer,
+ .set_peer = mlx5_cmd_stub_set_peer,
+ .create_ns = mlx5_cmd_stub_create_ns,
+ .destroy_ns = mlx5_cmd_stub_destroy_ns,
+@@ -942,6 +997,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
+ .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
++ .create_match_definer = mlx5_cmd_stub_create_match_definer,
++ .destroy_match_definer = mlx5_cmd_stub_destroy_match_definer,
+ .set_peer = mlx5_cmd_stub_set_peer,
+ .create_ns = mlx5_cmd_stub_create_ns,
+ .destroy_ns = mlx5_cmd_stub_destroy_ns,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+index 5ecd33cdc087..220ec632d35a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+@@ -97,6 +97,10 @@ struct mlx5_flow_cmds {
+
+ int (*create_ns)(struct mlx5_flow_root_namespace *ns);
+ int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
++ int (*create_match_definer)(struct mlx5_flow_root_namespace *ns,
++ u16 format_id, u32 *match_mask);
++ int (*destroy_match_definer)(struct mlx5_flow_root_namespace *ns,
++ int definer_id);
+ };
+
+ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index a55cacb988ac..fbfa5637714d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -3319,6 +3319,52 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
+ }
+ EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
+
++int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
++{
++ return definer->id;
++}
++
++struct mlx5_flow_definer *
++mlx5_create_match_definer(struct mlx5_core_dev *dev,
++ enum mlx5_flow_namespace_type ns_type, u16 format_id,
++ u32 *match_mask)
++{
++ struct mlx5_flow_root_namespace *root;
++ struct mlx5_flow_definer *definer;
++ int id;
++
++ root = get_root_namespace(dev, ns_type);
++ if (!root)
++ return ERR_PTR(-EOPNOTSUPP);
++
++ definer = kzalloc(sizeof(*definer), GFP_KERNEL);
++ if (!definer)
++ return ERR_PTR(-ENOMEM);
++
++ definer->ns_type = ns_type;
++ id = root->cmds->create_match_definer(root, format_id, match_mask);
++ if (id < 0) {
++ mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
++ kfree(definer);
++ return ERR_PTR(id);
++ }
++ definer->id = id;
++ return definer;
++}
++
++void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
++ struct mlx5_flow_definer *definer)
++{
++ struct mlx5_flow_root_namespace *root;
++
++ root = get_root_namespace(dev, definer->ns_type);
++ if (WARN_ON(!root))
++ return;
++
++ root->cmds->destroy_match_definer(root, definer->id);
++ kfree(definer);
++}
++
+ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+index 98240badc342..67cf3cbb8618 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+@@ -49,6 +49,11 @@
+ #define FDB_TC_MAX_PRIO 16
+ #define FDB_TC_LEVELS_PER_PRIO 2
+
++struct mlx5_flow_definer {
++ enum mlx5_flow_namespace_type ns_type;
++ u32 id;
++};
++
+ struct mlx5_modify_hdr {
+ enum mlx5_flow_namespace_type ns_type;
+ union {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+index 0553ee1fe80a..5d22a28294d5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+@@ -626,6 +626,19 @@ static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *n
+ mlx5dr_action_destroy(modify_hdr->action.dr_action);
+ }
+
++static int
++mlx5_cmd_dr_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
++ int definer_id)
++{
++ return -EOPNOTSUPP;
++}
++
++static int mlx5_cmd_dr_create_match_definer(struct mlx5_flow_root_namespace *ns,
++ u16 format_id, u32 *match_mask)
++{
++ return -EOPNOTSUPP;
++}
++
+ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+@@ -728,6 +741,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
+ .packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
++ .create_match_definer = mlx5_cmd_dr_create_match_definer,
++ .destroy_match_definer = mlx5_cmd_dr_destroy_match_definer,
+ .set_peer = mlx5_cmd_dr_set_peer,
+ .create_ns = mlx5_cmd_dr_create_ns,
+ .destroy_ns = mlx5_cmd_dr_destroy_ns,
+diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
+index 0106c67e8ccb..0e43f0fb6d73 100644
+--- a/include/linux/mlx5/fs.h
++++ b/include/linux/mlx5/fs.h
+@@ -97,6 +97,7 @@ enum {
+
+ struct mlx5_pkt_reformat;
+ struct mlx5_modify_hdr;
++struct mlx5_flow_definer;
+ struct mlx5_flow_table;
+ struct mlx5_flow_group;
+ struct mlx5_flow_namespace;
+@@ -257,6 +258,13 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ void *modify_actions);
+ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
+ struct mlx5_modify_hdr *modify_hdr);
++struct mlx5_flow_definer *
++mlx5_create_match_definer(struct mlx5_core_dev *dev,
++ enum mlx5_flow_namespace_type ns_type, u16 format_id,
++ u32 *match_mask);
++void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
++ struct mlx5_flow_definer *definer);
++int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
+
+ struct mlx5_pkt_reformat_params {
+ int type;
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index b89992e8a3c8..5151573da9b2 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -94,6 +94,7 @@ enum {
+ enum {
+ MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
+ MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
++ MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
+ MLX5_OBJ_TYPE_MKEY = 0xff01,
+ MLX5_OBJ_TYPE_QP = 0xff02,
+ MLX5_OBJ_TYPE_PSV = 0xff03,
+@@ -1719,7 +1720,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
+ u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
+ u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
+
+- u8 reserved_at_6e0[0x10];
++ u8 max_num_match_definer[0x10];
+ u8 sf_base_id[0x10];
+
+ u8 flex_parser_id_gtpu_dw_2[0x4];
+@@ -1734,7 +1735,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
+
+ u8 reserved_at_760[0x20];
+ u8 vhca_tunnel_commands[0x40];
+- u8 reserved_at_7c0[0x40];
++ u8 match_definer_format_supported[0x40];
+ };
+
+ struct mlx5_ifc_cmd_hca_cap_2_bits {
+@@ -5618,6 +5619,236 @@ struct mlx5_ifc_query_fte_in_bits {
+ u8 reserved_at_120[0xe0];
+ };
+
++struct mlx5_ifc_match_definer_format_0_bits {
++ u8 reserved_at_0[0x100];
++
++ u8 metadata_reg_c_0[0x20];
++
++ u8 metadata_reg_c_1[0x20];
++
++ u8 outer_dmac_47_16[0x20];
++
++ u8 outer_dmac_15_0[0x10];
++ u8 outer_ethertype[0x10];
++
++ u8 reserved_at_180[0x1];
++ u8 sx_sniffer[0x1];
++ u8 functional_lb[0x1];
++ u8 outer_ip_frag[0x1];
++ u8 outer_qp_type[0x2];
++ u8 outer_encap_type[0x2];
++ u8 port_number[0x2];
++ u8 outer_l3_type[0x2];
++ u8 outer_l4_type[0x2];
++ u8 outer_first_vlan_type[0x2];
++ u8 outer_first_vlan_prio[0x3];
++ u8 outer_first_vlan_cfi[0x1];
++ u8 outer_first_vlan_vid[0xc];
++
++ u8 outer_l4_type_ext[0x4];
++ u8 reserved_at_1a4[0x2];
++ u8 outer_ipsec_layer[0x2];
++ u8 outer_l2_type[0x2];
++ u8 force_lb[0x1];
++ u8 outer_l2_ok[0x1];
++ u8 outer_l3_ok[0x1];
++ u8 outer_l4_ok[0x1];
++ u8 outer_second_vlan_type[0x2];
++ u8 outer_second_vlan_prio[0x3];
++ u8 outer_second_vlan_cfi[0x1];
++ u8 outer_second_vlan_vid[0xc];
++
++ u8 outer_smac_47_16[0x20];
++
++ u8 outer_smac_15_0[0x10];
++ u8 inner_ipv4_checksum_ok[0x1];
++ u8 inner_l4_checksum_ok[0x1];
++ u8 outer_ipv4_checksum_ok[0x1];
++ u8 outer_l4_checksum_ok[0x1];
++ u8 inner_l3_ok[0x1];
++ u8 inner_l4_ok[0x1];
++ u8 outer_l3_ok_duplicate[0x1];
++ u8 outer_l4_ok_duplicate[0x1];
++ u8 outer_tcp_cwr[0x1];
++ u8 outer_tcp_ece[0x1];
++ u8 outer_tcp_urg[0x1];
++ u8 outer_tcp_ack[0x1];
++ u8 outer_tcp_psh[0x1];
++ u8 outer_tcp_rst[0x1];
++ u8 outer_tcp_syn[0x1];
++ u8 outer_tcp_fin[0x1];
++};
++
++struct mlx5_ifc_match_definer_format_22_bits {
++ u8 reserved_at_0[0x100];
++
++ u8 outer_ip_src_addr[0x20];
++
++ u8 outer_ip_dest_addr[0x20];
++
++ u8 outer_l4_sport[0x10];
++ u8 outer_l4_dport[0x10];
++
++ u8 reserved_at_160[0x1];
++ u8 sx_sniffer[0x1];
++ u8 functional_lb[0x1];
++ u8 outer_ip_frag[0x1];
++ u8 outer_qp_type[0x2];
++ u8 outer_encap_type[0x2];
++ u8 port_number[0x2];
++ u8 outer_l3_type[0x2];
++ u8 outer_l4_type[0x2];
++ u8 outer_first_vlan_type[0x2];
++ u8 outer_first_vlan_prio[0x3];
++ u8 outer_first_vlan_cfi[0x1];
++ u8 outer_first_vlan_vid[0xc];
++
++ u8 metadata_reg_c_0[0x20];
++
++ u8 outer_dmac_47_16[0x20];
++
++ u8 outer_smac_47_16[0x20];
++
++ u8 outer_smac_15_0[0x10];
++ u8 outer_dmac_15_0[0x10];
++};
++
++struct mlx5_ifc_match_definer_format_23_bits {
++ u8 reserved_at_0[0x100];
++
++ u8 inner_ip_src_addr[0x20];
++
++ u8 inner_ip_dest_addr[0x20];
++
++ u8 inner_l4_sport[0x10];
++ u8 inner_l4_dport[0x10];
++
++ u8 reserved_at_160[0x1];
++ u8 sx_sniffer[0x1];
++ u8 functional_lb[0x1];
++ u8 inner_ip_frag[0x1];
++ u8 inner_qp_type[0x2];
++ u8 inner_encap_type[0x2];
++ u8 port_number[0x2];
++ u8 inner_l3_type[0x2];
++ u8 inner_l4_type[0x2];
++ u8 inner_first_vlan_type[0x2];
++ u8 inner_first_vlan_prio[0x3];
++ u8 inner_first_vlan_cfi[0x1];
++ u8 inner_first_vlan_vid[0xc];
++
++ u8 tunnel_header_0[0x20];
++
++ u8 inner_dmac_47_16[0x20];
++
++ u8 inner_smac_47_16[0x20];
++
++ u8 inner_smac_15_0[0x10];
++ u8 inner_dmac_15_0[0x10];
++};
++
++struct mlx5_ifc_match_definer_format_29_bits {
++ u8 reserved_at_0[0xc0];
++
++ u8 outer_ip_dest_addr[0x80];
++
++ u8 outer_ip_src_addr[0x80];
++
++ u8 outer_l4_sport[0x10];
++ u8 outer_l4_dport[0x10];
++
++ u8 reserved_at_1e0[0x20];
++};
++
++struct mlx5_ifc_match_definer_format_30_bits {
++ u8 reserved_at_0[0xa0];
++
++ u8 outer_ip_dest_addr[0x80];
++
++ u8 outer_ip_src_addr[0x80];
++
++ u8 outer_dmac_47_16[0x20];
++
++ u8 outer_smac_47_16[0x20];
++
++ u8 outer_smac_15_0[0x10];
++ u8 outer_dmac_15_0[0x10];
++};
++
++struct mlx5_ifc_match_definer_format_31_bits {
++ u8 reserved_at_0[0xc0];
++
++ u8 inner_ip_dest_addr[0x80];
++
++ u8 inner_ip_src_addr[0x80];
++
++ u8 inner_l4_sport[0x10];
++ u8 inner_l4_dport[0x10];
++
++ u8 reserved_at_1e0[0x20];
++};
++
++struct mlx5_ifc_match_definer_format_32_bits {
++ u8 reserved_at_0[0xa0];
++
++ u8 inner_ip_dest_addr[0x80];
++
++ u8 inner_ip_src_addr[0x80];
++
++ u8 inner_dmac_47_16[0x20];
++
++ u8 inner_smac_47_16[0x20];
++
++ u8 inner_smac_15_0[0x10];
++ u8 inner_dmac_15_0[0x10];
++};
++
++struct mlx5_ifc_match_definer_bits {
++ u8 modify_field_select[0x40];
++
++ u8 reserved_at_40[0x40];
++
++ u8 reserved_at_80[0x10];
++ u8 format_id[0x10];
++
++ u8 reserved_at_a0[0x160];
++
++ u8 match_mask[16][0x20];
++};
++
++struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
++ u8 opcode[0x10];
++ u8 uid[0x10];
++
++ u8 vhca_tunnel_id[0x10];
++ u8 obj_type[0x10];
++
++ u8 obj_id[0x20];
++
++ u8 reserved_at_60[0x20];
++};
++
++struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
++ u8 status[0x8];
++ u8 reserved_at_8[0x18];
++
++ u8 syndrome[0x20];
++
++ u8 obj_id[0x20];
++
++ u8 reserved_at_60[0x20];
++};
++
++struct mlx5_ifc_create_match_definer_in_bits {
++ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
++
++ struct mlx5_ifc_match_definer_bits obj_context;
++};
++
++struct mlx5_ifc_create_match_definer_out_bits {
++ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
++};
++
+ enum {
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+@@ -8091,6 +8322,11 @@ struct mlx5_ifc_create_flow_group_out_bits {
+ u8 reserved_at_60[0x20];
+ };
+
++enum {
++ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0,
++ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1,
++};
++
+ enum {
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+@@ -8112,7 +8348,9 @@ struct mlx5_ifc_create_flow_group_in_bits {
+ u8 reserved_at_60[0x20];
+
+ u8 table_type[0x8];
+- u8 reserved_at_88[0x18];
++ u8 reserved_at_88[0x4];
++ u8 group_type[0x4];
++ u8 reserved_at_90[0x10];
+
+ u8 reserved_at_a0[0x8];
+ u8 table_id[0x18];
+@@ -8127,7 +8365,10 @@ struct mlx5_ifc_create_flow_group_in_bits {
+
+ u8 end_flow_index[0x20];
+
+- u8 reserved_at_140[0xa0];
++ u8 reserved_at_140[0x10];
++ u8 match_definer_id[0x10];
++
++ u8 reserved_at_160[0x80];
+
+ u8 reserved_at_1e0[0x18];
+ u8 match_criteria_enable[0x8];
+@@ -10617,29 +10858,6 @@ struct mlx5_ifc_dealloc_memic_out_bits {
+ u8 reserved_at_40[0x40];
+ };
+
+-struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+- u8 opcode[0x10];
+- u8 uid[0x10];
+-
+- u8 vhca_tunnel_id[0x10];
+- u8 obj_type[0x10];
+-
+- u8 obj_id[0x20];
+-
+- u8 reserved_at_60[0x20];
+-};
+-
+-struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+- u8 status[0x8];
+- u8 reserved_at_8[0x18];
+-
+- u8 syndrome[0x20];
+-
+- u8 obj_id[0x20];
+-
+- u8 reserved_at_60[0x20];
+-};
+-
+ struct mlx5_ifc_umem_bits {
+ u8 reserved_at_0[0x80];
+
+--
+2.43.0
+
--- /dev/null
+From 0a441e073a1a7b6b97a4d5c3a2b1ea5d04a7ccc0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Sep 2024 11:46:14 +0300
+Subject: net/mlx5: Explicitly set scheduling element and TSAR type
+
+From: Carolina Jubran <cjubran@nvidia.com>
+
+[ Upstream commit c88146abe4d0f8cf659b2b8883fdc33936d2e3b8 ]
+
+Ensure the scheduling element type and TSAR type are explicitly
+initialized in the QoS rate group creation.
+
+This prevents potential issues due to default values.
+
+Fixes: 1ae258f8b343 ("net/mlx5: E-switch, Introduce rate limiting groups API")
+Signed-off-by: Carolina Jubran <cjubran@nvidia.com>
+Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+index 65c8f1f08472..b7758a1c015e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+@@ -424,6 +424,7 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
+ {
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_esw_rate_group *group;
++ __be32 *attr;
+ u32 divider;
+ int err;
+
+@@ -434,6 +435,12 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
+ if (!group)
+ return ERR_PTR(-ENOMEM);
+
++ MLX5_SET(scheduling_context, tsar_ctx, element_type,
++ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
++
++ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
++ *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
++
+ MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
+ esw->qos.root_tsar_ix);
+ err = mlx5_create_scheduling_element_cmd(esw->dev,
+--
+2.43.0
+
--- /dev/null
+From fdadd94274e73dcc0ba6f5d9963b2da4bf0dd211 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 11 Aug 2024 13:56:13 +0300
+Subject: net/mlx5e: Add missing link modes to ptys2ethtool_map
+
+From: Shahar Shitrit <shshitrit@nvidia.com>
+
+[ Upstream commit 7617d62cba4a8a3ff3ed3fda0171c43f135c142e ]
+
+Add MLX5E_1000BASE_T and MLX5E_100BASE_TX to the legacy
+modes in ptys2legacy_ethtool_table, since they were missing.
+
+Fixes: 665bc53969d7 ("net/mlx5e: Use new ethtool get/set link ksettings API")
+Signed-off-by: Shahar Shitrit <shshitrit@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 06f6809b1c2b..a9080e3ecd84 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -129,6 +129,10 @@ void mlx5e_build_ptys2ethtool_map(void)
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
++ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100BASE_TX, legacy,
++ ETHTOOL_LINK_MODE_100baseT_Full_BIT);
++ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_T, legacy,
++ ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy,
+--
+2.43.0
+
--- /dev/null
+From 213aa0be69b3c8d774fbb28418afc7dade0fa0bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Sep 2024 12:54:46 +0200
+Subject: netfilter: nft_socket: fix sk refcount leaks
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 8b26ff7af8c32cb4148b3e147c52f9e4c695209c ]
+
+We must put 'sk' reference before returning.
+
+Fixes: 039b1f4f24ec ("netfilter: nft_socket: fix erroneous socket assignment")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_socket.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index 3c444fcb20ec..3cbfb6ba32c7 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -108,13 +108,13 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ *dest = sk->sk_mark;
+ } else {
+ regs->verdict.code = NFT_BREAK;
+- return;
++ goto out_put_sk;
+ }
+ break;
+ case NFT_SOCKET_WILDCARD:
+ if (!sk_fullsock(sk)) {
+ regs->verdict.code = NFT_BREAK;
+- return;
++ goto out_put_sk;
+ }
+ nft_socket_wildcard(pkt, regs, sk, dest);
+ break;
+@@ -122,7 +122,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ case NFT_SOCKET_CGROUPV2:
+ if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
+ regs->verdict.code = NFT_BREAK;
+- return;
++ goto out_put_sk;
+ }
+ break;
+ #endif
+@@ -131,6 +131,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ regs->verdict.code = NFT_BREAK;
+ }
+
++out_put_sk:
+ if (sk != skb->sk)
+ sock_gen_put(sk);
+ }
+--
+2.43.0
+
--- /dev/null
+From bb980be6dda7821b2bce8890db0950ce66254f4b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Sep 2024 10:28:38 +0530
+Subject: octeontx2-af: Modify SMQ flush sequence to drop packets
+
+From: Naveen Mamindlapalli <naveenm@marvell.com>
+
+[ Upstream commit 019aba04f08c2102b35ce7fee9d4628d349f56c0 ]
+
+The current implementation of SMQ flush sequence waits for the packets
+in the TM pipeline to be transmitted out of the link. This sequence
+doesn't succeed in HW when there is any issue with link such as lack of
+link credits, link down or any other traffic that is fully occupying the
+link bandwidth (QoS). This patch modifies the SMQ flush sequence to
+drop the packets after TL1 level (SQM) instead of polling for the packets
+to be sent out of RPM/CGX link.
+
+Fixes: 5d9b976d4480 ("octeontx2-af: Support fixed transmit scheduler topology")
+Signed-off-by: Naveen Mamindlapalli <naveenm@marvell.com>
+Reviewed-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
+Link: https://patch.msgid.link/20240906045838.1620308-1-naveenm@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/marvell/octeontx2/af/rvu.h | 3 +-
+ .../ethernet/marvell/octeontx2/af/rvu_nix.c | 59 +++++++++++++++----
+ 2 files changed, 48 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index db02fae7b831..66bb2222350c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -279,6 +279,7 @@ struct nix_mark_format {
+
+ /* smq(flush) to tl1 cir/pir info */
+ struct nix_smq_tree_ctx {
++ u16 schq;
+ u64 cir_off;
+ u64 cir_val;
+ u64 pir_off;
+@@ -288,8 +289,6 @@ struct nix_smq_tree_ctx {
+ /* smq flush context */
+ struct nix_smq_flush_ctx {
+ int smq;
+- u16 tl1_schq;
+- u16 tl2_schq;
+ struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
+ };
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index e6b368ec4a3b..f2e1c63035e8 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -2094,14 +2094,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
+ schq = smq;
+ for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
+ smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
++ smq_tree_ctx->schq = schq;
+ if (lvl == NIX_TXSCH_LVL_TL1) {
+- smq_flush_ctx->tl1_schq = schq;
+ smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
+ smq_tree_ctx->pir_off = 0;
+ smq_tree_ctx->pir_val = 0;
+ parent_off = 0;
+ } else if (lvl == NIX_TXSCH_LVL_TL2) {
+- smq_flush_ctx->tl2_schq = schq;
+ smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
+ smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
+ parent_off = NIX_AF_TL2X_PARENT(schq);
+@@ -2136,8 +2135,8 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
+ {
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
++ int tl2, tl2_schq;
+ u64 regoff;
+- int tl2;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+@@ -2145,16 +2144,17 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
+
+ /* loop through all TL2s with matching PF_FUNC */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
++ tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
+ for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
+ /* skip the smq(flush) TL2 */
+- if (tl2 == smq_flush_ctx->tl2_schq)
++ if (tl2 == tl2_schq)
+ continue;
+ /* skip unused TL2s */
+ if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
+ continue;
+ /* skip if PF_FUNC doesn't match */
+ if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
+- (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
++ (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
+ ~RVU_PFVF_FUNC_MASK)))
+ continue;
+ /* enable/disable XOFF */
+@@ -2196,10 +2196,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ int smq, u16 pcifunc, int nixlf)
+ {
+ struct nix_smq_flush_ctx *smq_flush_ctx;
++ int err, restore_tx_en = 0, i;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+- int err, restore_tx_en = 0;
+- u64 cfg;
++ u16 tl2_tl3_link_schq;
++ u8 link, link_level;
++ u64 cfg, bmap = 0;
+
+ /* enable cgx tx if disabled */
+ if (is_pf_cgxmapped(rvu, pf)) {
+@@ -2216,16 +2218,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
+ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
+
+- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+- /* Do SMQ flush and set enqueue xoff */
+- cfg |= BIT_ULL(50) | BIT_ULL(49);
+- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+-
+ /* Disable backpressure from physical link,
+ * otherwise SMQ flush may stall.
+ */
+ rvu_cgx_enadis_rx_bp(rvu, pf, false);
+
++ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
++ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
++ tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
++ link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
++
++ /* SMQ set enqueue xoff */
++ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
++ cfg |= BIT_ULL(50);
++ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
++
++ /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
++ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
++ cfg = rvu_read64(rvu, blkaddr,
++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
++ if (!(cfg & BIT_ULL(12)))
++ continue;
++ bmap |= (1 << i);
++ cfg &= ~BIT_ULL(12);
++ rvu_write64(rvu, blkaddr,
++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
++ }
++
++ /* Do SMQ flush and set enqueue xoff */
++ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
++ cfg |= BIT_ULL(50) | BIT_ULL(49);
++ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
++
+ /* Wait for flush to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
+@@ -2234,6 +2258,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
+ nixlf, smq);
+
++ /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
++ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
++ if (!(bmap & (1 << i)))
++ continue;
++ cfg = rvu_read64(rvu, blkaddr,
++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
++ cfg |= BIT_ULL(12);
++ rvu_write64(rvu, blkaddr,
++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
++ }
++
+ /* clear XOFF on TL2s */
+ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
+ nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
+--
+2.43.0
+
--- /dev/null
+From 8e22ddbeda70124e33b27af089e592b8858dace4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 11:34:24 +0530
+Subject: octeontx2-af: Set XOFF on other child transmit schedulers during SMQ
+ flush
+
+From: Naveen Mamindlapalli <naveenm@marvell.com>
+
+[ Upstream commit e18aab0470d8f6259be82282ffb3fdcfeaeff6c3 ]
+
+When multiple transmit scheduler queues feed a TL1 transmit link, the
+SMQ flush initiated on a low priority queue might get stuck when a high
+priority queue fully subscribes the transmit link. This inturn effects
+interface teardown. To avoid this, temporarily XOFF all TL1's other
+immediate child transmit scheduler queues and also clear any rate limit
+configuration on all the scheduler queues in SMQ(flush) hierarchy.
+
+Signed-off-by: Naveen Mamindlapalli <naveenm@marvell.com>
+Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 019aba04f08c ("octeontx2-af: Modify SMQ flush sequence to drop packets")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/marvell/octeontx2/af/rvu.h | 16 +++
+ .../ethernet/marvell/octeontx2/af/rvu_nix.c | 130 +++++++++++++++++-
+ 2 files changed, 144 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index b4be1b597f33..db02fae7b831 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -277,6 +277,22 @@ struct nix_mark_format {
+ u32 *cfg;
+ };
+
++/* smq(flush) to tl1 cir/pir info */
++struct nix_smq_tree_ctx {
++ u64 cir_off;
++ u64 cir_val;
++ u64 pir_off;
++ u64 pir_val;
++};
++
++/* smq flush context */
++struct nix_smq_flush_ctx {
++ int smq;
++ u16 tl1_schq;
++ u16 tl2_schq;
++ struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
++};
++
+ struct npc_pkind {
+ struct rsrc_bmap rsrc;
+ u32 *pfchan_map;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 641f1d969bb7..e6b368ec4a3b 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -2081,9 +2081,121 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
+ return rc;
+ }
+
++static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
++ struct nix_smq_flush_ctx *smq_flush_ctx)
++{
++ struct nix_smq_tree_ctx *smq_tree_ctx;
++ u64 parent_off, regval;
++ u16 schq;
++ int lvl;
++
++ smq_flush_ctx->smq = smq;
++
++ schq = smq;
++ for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
++ smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
++ if (lvl == NIX_TXSCH_LVL_TL1) {
++ smq_flush_ctx->tl1_schq = schq;
++ smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
++ smq_tree_ctx->pir_off = 0;
++ smq_tree_ctx->pir_val = 0;
++ parent_off = 0;
++ } else if (lvl == NIX_TXSCH_LVL_TL2) {
++ smq_flush_ctx->tl2_schq = schq;
++ smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
++ smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
++ parent_off = NIX_AF_TL2X_PARENT(schq);
++ } else if (lvl == NIX_TXSCH_LVL_TL3) {
++ smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
++ smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
++ parent_off = NIX_AF_TL3X_PARENT(schq);
++ } else if (lvl == NIX_TXSCH_LVL_TL4) {
++ smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
++ smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
++ parent_off = NIX_AF_TL4X_PARENT(schq);
++ } else if (lvl == NIX_TXSCH_LVL_MDQ) {
++ smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
++ smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
++ parent_off = NIX_AF_MDQX_PARENT(schq);
++ }
++ /* save cir/pir register values */
++ smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
++ if (smq_tree_ctx->pir_off)
++ smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
++
++ /* get parent txsch node */
++ if (parent_off) {
++ regval = rvu_read64(rvu, blkaddr, parent_off);
++ schq = (regval >> 16) & 0x1FF;
++ }
++ }
++}
++
++static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
++ struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
++{
++ struct nix_txsch *txsch;
++ struct nix_hw *nix_hw;
++ u64 regoff;
++ int tl2;
++
++ nix_hw = get_nix_hw(rvu->hw, blkaddr);
++ if (!nix_hw)
++ return;
++
++ /* loop through all TL2s with matching PF_FUNC */
++ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
++ for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
++ /* skip the smq(flush) TL2 */
++ if (tl2 == smq_flush_ctx->tl2_schq)
++ continue;
++ /* skip unused TL2s */
++ if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
++ continue;
++ /* skip if PF_FUNC doesn't match */
++ if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
++ (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
++ ~RVU_PFVF_FUNC_MASK)))
++ continue;
++ /* enable/disable XOFF */
++ regoff = NIX_AF_TL2X_SW_XOFF(tl2);
++ if (enable)
++ rvu_write64(rvu, blkaddr, regoff, 0x1);
++ else
++ rvu_write64(rvu, blkaddr, regoff, 0x0);
++ }
++}
++
++static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
++ struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
++{
++ u64 cir_off, pir_off, cir_val, pir_val;
++ struct nix_smq_tree_ctx *smq_tree_ctx;
++ int lvl;
++
++ for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
++ smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
++ cir_off = smq_tree_ctx->cir_off;
++ cir_val = smq_tree_ctx->cir_val;
++ pir_off = smq_tree_ctx->pir_off;
++ pir_val = smq_tree_ctx->pir_val;
++
++ if (enable) {
++ rvu_write64(rvu, blkaddr, cir_off, cir_val);
++ if (lvl != NIX_TXSCH_LVL_TL1)
++ rvu_write64(rvu, blkaddr, pir_off, pir_val);
++ } else {
++ rvu_write64(rvu, blkaddr, cir_off, 0x0);
++ if (lvl != NIX_TXSCH_LVL_TL1)
++ rvu_write64(rvu, blkaddr, pir_off, 0x0);
++ }
++ }
++}
++
+ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ int smq, u16 pcifunc, int nixlf)
+ {
++ struct nix_smq_flush_ctx *smq_flush_ctx;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+ int err, restore_tx_en = 0;
+@@ -2096,6 +2208,14 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ lmac_id, true);
+ }
+
++ /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
++ smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
++ if (!smq_flush_ctx)
++ return -ENOMEM;
++ nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
++ nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
++ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
++
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+@@ -2110,8 +2230,14 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
+ if (err)
+- dev_err(rvu->dev,
+- "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
++ dev_info(rvu->dev,
++ "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
++ nixlf, smq);
++
++ /* clear XOFF on TL2s */
++ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
++ nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
++ kfree(smq_flush_ctx);
+
+ rvu_cgx_enadis_rx_bp(rvu, pf, true);
+ /* restore cgx tx state */
+--
+2.43.0
+
arm64-dts-rockchip-fix-pmic-interrupt-pin-in-pinctrl.patch
eeprom-digsy_mtc-fix-93xx46-driver-probe-failure.patch
selftests-bpf-support-sock_stream-in-unix_inet_redir.patch
+hwmon-pmbus-introduce-and-use-write_byte_data-callba.patch
+hwmon-pmbus-conditionally-clear-individual-status-bi.patch
+ice-fix-accounting-for-filters-shared-by-multiple-vs.patch
+igb-always-call-igb_xdp_ring_update_tail-under-tx-lo.patch
+net-mlx5e-add-missing-link-modes-to-ptys2ethtool_map.patch
+net-mlx5-explicitly-set-scheduling-element-and-tsar-.patch
+net-mlx5-add-support-to-create-match-definer.patch
+net-mlx5-add-ifc-bits-and-enums-for-flow-meter.patch
+net-mlx5-add-missing-masks-and-qos-bit-masks-for-sch.patch
+fou-fix-initialization-of-grc.patch
+octeontx2-af-set-xoff-on-other-child-transmit-schedu.patch
+octeontx2-af-modify-smq-flush-sequence-to-drop-packe.patch
+net-ftgmac100-enable-tx-interrupt-to-avoid-tx-timeou.patch
+netfilter-nft_socket-fix-sk-refcount-leaks.patch
+net-dpaa-pad-packets-to-eth_zlen.patch