--- /dev/null
+From 48dc6d2fe28865a5c3d271aeb966b984a8085e7c Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:35 +0800
+Subject: [PATCH] dt-bindings: net: Add PPE for Qualcomm IPQ9574 SoC
+
+The PPE (packet process engine) hardware block is available in Qualcomm
+IPQ chipsets that support PPE architecture, such as IPQ9574. The PPE in
+the IPQ9574 SoC includes six ethernet ports (6 GMAC and 6 XGMAC), which
+are used to connect with external PHY devices by PCS. It includes an L2
+switch function for bridging packets among the 6 ethernet ports and the
+CPU port. The CPU port enables packet transfer between the ethernet
+ports and the ARM cores in the SoC, using the ethernet DMA.
+
+The PPE also includes packet processing offload capabilities for various
+networking functions such as route and bridge flows, VLANs, different
+tunnel protocols and VPN.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../bindings/net/qcom,ipq9574-ppe.yaml | 406 ++++++++++++++++++
+ 1 file changed, 406 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/net/qcom,ipq9574-ppe.yaml
+
+--- /dev/null
++++ b/Documentation/devicetree/bindings/net/qcom,ipq9574-ppe.yaml
+@@ -0,0 +1,406 @@
++# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/net/qcom,ipq9574-ppe.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: Qualcomm IPQ packet process engine (PPE)
++
++maintainers:
++ - Luo Jie <quic_luoj@quicinc.com>
++ - Lei Wei <quic_leiwei@quicinc.com>
++ - Suruchi Agarwal <quic_suruchia@quicinc.com>
++ - Pavithra R <quic_pavir@quicinc.com>>
++
++description:
++ The Ethernet functionality in the PPE (Packet Process Engine) is comprised
++ of three components, the switch core, port wrapper and Ethernet DMA.
++
++ The Switch core in the IPQ9574 PPE has maximum of 6 front panel ports and
++ two FIFO interfaces. One of the two FIFO interfaces is used for Ethernet
++ port to host CPU communication using Ethernet DMA. The other is used
++ communicating to the EIP engine which is used for IPsec offload. On the
++ IPQ9574, the PPE includes 6 GMAC/XGMACs that can be connected with external
++ Ethernet PHY. Switch core also includes BM (Buffer Management), QM (Queue
++ Management) and SCH (Scheduler) modules for supporting the packet processing.
++
++ The port wrapper provides connections from the 6 GMAC/XGMACS to UNIPHY (PCS)
++ supporting various modes such as SGMII/QSGMII/PSGMII/USXGMII/10G-BASER. There
++ are 3 UNIPHY (PCS) instances supported on the IPQ9574.
++
++ Ethernet DMA is used to transmit and receive packets between the six Ethernet
++ ports and ARM host CPU.
++
++ The follow diagram shows the PPE hardware block along with its connectivity
++ to the external hardware blocks such clock hardware blocks (CMNPLL, GCC,
++ NSS clock controller) and ethernet PCS/PHY blocks. For depicting the PHY
++ connectivity, one 4x1 Gbps PHY (QCA8075) and two 10 GBps PHYs are used as an
++ example.
++ - |
++ +---------+
++ | 48 MHZ |
++ +----+----+
++ |(clock)
++ v
++ +----+----+
++ +------| CMN PLL |
++ | +----+----+
++ | |(clock)
++ | v
++ | +----+----+ +----+----+ (clock) +----+----+
++ | +---| NSSCC | | GCC |--------->| MDIO |
++ | | +----+----+ +----+----+ +----+----+
++ | | |(clock & reset) |(clock)
++ | | v v
++ | | +-----------------------------+----------+----------+---------+
++ | | | +-----+ |EDMA FIFO | | EIP FIFO|
++ | | | | SCH | +----------+ +---------+
++ | | | +-----+ | | |
++ | | | +------+ +------+ +-------------------+ |
++ | | | | BM | | QM | IPQ9574-PPE | L2/L3 Process | |
++ | | | +------+ +------+ +-------------------+ |
++ | | | | |
++ | | | +-------+ +-------+ +-------+ +-------+ +-------+ +-------+ |
++ | | | | MAC0 | | MAC1 | | MAC2 | | MAC3 | | XGMAC4| |XGMAC5 | |
++ | | | +---+---+ +---+---+ +---+---+ +---+---+ +---+---+ +---+---+ |
++ | | | | | | | | | |
++ | | +-----+---------+---------+---------+---------+---------+-----+
++ | | | | | | | |
++ | | +---+---------+---------+---------+---+ +---+---+ +---+---+
++ +--+---->| PCS0 | | PCS1 | | PCS2 |
++ |(clock) +---+---------+---------+---------+---+ +---+---+ +---+---+
++ | | | | | | |
++ | +---+---------+---------+---------+---+ +---+---+ +---+---+
++ +------->| QCA8075 PHY | | PHY4 | | PHY5 |
++ (clock) +-------------------------------------+ +-------+ +-------+
++
++properties:
++ compatible:
++ enum:
++ - qcom,ipq9574-ppe
++
++ reg:
++ maxItems: 1
++
++ clocks:
++ items:
++ - description: PPE core clock from NSS clock controller
++ - description: PPE APB (Advanced Peripheral Bus) clock from NSS clock controller
++ - description: PPE ingress process engine clock from NSS clock controller
++ - description: PPE BM, QM and scheduler clock from NSS clock controller
++
++ clock-names:
++ items:
++ - const: ppe
++ - const: apb
++ - const: ipe
++ - const: btq
++
++ resets:
++ maxItems: 1
++ description: PPE reset, which is necessary before configuring PPE hardware
++
++ interconnects:
++ items:
++ - description: Clock path leading to PPE switch core function
++ - description: Clock path leading to PPE register access
++ - description: Clock path leading to QoS generation
++ - description: Clock path leading to timeout reference
++ - description: Clock path leading to NSS NOC from memory NOC
++ - description: Clock path leading to memory NOC from NSS NOC
++ - description: Clock path leading to enhanced memory NOC from NSS NOC
++
++ interconnect-names:
++ items:
++ - const: ppe
++ - const: ppe_cfg
++ - const: qos_gen
++ - const: timeout_ref
++ - const: nssnoc_memnoc
++ - const: memnoc_nssnoc
++ - const: memnoc_nssnoc_1
++
++ ethernet-dma:
++ type: object
++ additionalProperties: false
++ description:
++ EDMA (Ethernet DMA) is used to transmit packets between PPE and ARM
++ host CPU. There are 32 TX descriptor rings, 32 TX completion rings,
++ 24 RX descriptor rings and 8 RX fill rings supported.
++
++ properties:
++ clocks:
++ items:
++ - description: EDMA system clock from NSS Clock Controller
++ - description: EDMA APB (Advanced Peripheral Bus) clock from
++ NSS Clock Controller
++
++ clock-names:
++ items:
++ - const: sys
++ - const: apb
++
++ resets:
++ maxItems: 1
++ description: EDMA reset from NSS clock controller
++
++ interrupts:
++ minItems: 29
++ maxItems: 57
++
++ interrupt-names:
++ minItems: 29
++ maxItems: 57
++ items:
++ pattern: '^(txcmpl_([0-9]|[1-2][0-9]|3[0-1])|rxdesc_([0-9]|1[0-9]|2[0-3])|misc)$'
++ description:
++ Interrupts "txcmpl_[0-31]" are the Ethernet DMA Tx completion ring interrupts.
++ Interrupts "rxdesc_[0-23]" are the Ethernet DMA Rx Descriptor ring interrupts.
++ Interrupt "misc" is the Ethernet DMA miscellaneous error interrupt.
++
++ required:
++ - clocks
++ - clock-names
++ - resets
++ - interrupts
++ - interrupt-names
++
++required:
++ - compatible
++ - reg
++ - clocks
++ - clock-names
++ - resets
++ - interconnects
++ - interconnect-names
++ - ethernet-dma
++
++allOf:
++ - $ref: ethernet-switch.yaml
++
++unevaluatedProperties: false
++
++examples:
++ - |
++ #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
++ #include <dt-bindings/interconnect/qcom,ipq9574.h>
++ #include <dt-bindings/interrupt-controller/arm-gic.h>
++
++ ethernet-switch@3a000000 {
++ compatible = "qcom,ipq9574-ppe";
++ reg = <0x3a000000 0xbef800>;
++ clocks = <&nsscc 80>,
++ <&nsscc 79>,
++ <&nsscc 81>,
++ <&nsscc 78>;
++ clock-names = "ppe",
++ "apb",
++ "ipe",
++ "btq";
++ resets = <&nsscc 108>;
++ interconnects = <&nsscc MASTER_NSSNOC_PPE &nsscc SLAVE_NSSNOC_PPE>,
++ <&nsscc MASTER_NSSNOC_PPE_CFG &nsscc SLAVE_NSSNOC_PPE_CFG>,
++ <&gcc MASTER_NSSNOC_QOSGEN_REF &gcc SLAVE_NSSNOC_QOSGEN_REF>,
++ <&gcc MASTER_NSSNOC_TIMEOUT_REF &gcc SLAVE_NSSNOC_TIMEOUT_REF>,
++ <&gcc MASTER_MEM_NOC_NSSNOC &gcc SLAVE_MEM_NOC_NSSNOC>,
++ <&gcc MASTER_NSSNOC_MEMNOC &gcc SLAVE_NSSNOC_MEMNOC>,
++ <&gcc MASTER_NSSNOC_MEM_NOC_1 &gcc SLAVE_NSSNOC_MEM_NOC_1>;
++ interconnect-names = "ppe",
++ "ppe_cfg",
++ "qos_gen",
++ "timeout_ref",
++ "nssnoc_memnoc",
++ "memnoc_nssnoc",
++ "memnoc_nssnoc_1";
++
++ ethernet-dma {
++ clocks = <&nsscc 77>,
++ <&nsscc 76>;
++ clock-names = "sys",
++ "apb";
++ resets = <&nsscc 0>;
++ interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 380 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 509 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 505 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 504 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 503 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 502 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 501 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 500 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 499 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "txcmpl_8",
++ "txcmpl_9",
++ "txcmpl_10",
++ "txcmpl_11",
++ "txcmpl_12",
++ "txcmpl_13",
++ "txcmpl_14",
++ "txcmpl_15",
++ "txcmpl_16",
++ "txcmpl_17",
++ "txcmpl_18",
++ "txcmpl_19",
++ "txcmpl_20",
++ "txcmpl_21",
++ "txcmpl_22",
++ "txcmpl_23",
++ "txcmpl_24",
++ "txcmpl_25",
++ "txcmpl_26",
++ "txcmpl_27",
++ "txcmpl_28",
++ "txcmpl_29",
++ "txcmpl_30",
++ "txcmpl_31",
++ "rxdesc_20",
++ "rxdesc_21",
++ "rxdesc_22",
++ "rxdesc_23",
++ "misc";
++ };
++
++ ethernet-ports {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ port@1 {
++ reg = <1>;
++ phy-mode = "qsgmii";
++ managed = "in-band-status";
++ phy-handle = <&phy0>;
++ pcs-handle = <&pcs0_mii0>;
++ clocks = <&nsscc 33>,
++ <&nsscc 34>,
++ <&nsscc 37>;
++ clock-names = "mac",
++ "rx",
++ "tx";
++ resets = <&nsscc 29>,
++ <&nsscc 96>,
++ <&nsscc 97>;
++ reset-names = "mac",
++ "rx",
++ "tx";
++ };
++
++ port@2 {
++ reg = <2>;
++ phy-mode = "qsgmii";
++ managed = "in-band-status";
++ phy-handle = <&phy1>;
++ pcs-handle = <&pcs0_mii1>;
++ clocks = <&nsscc 40>,
++ <&nsscc 41>,
++ <&nsscc 44>;
++ clock-names = "mac",
++ "rx",
++ "tx";
++ resets = <&nsscc 30>,
++ <&nsscc 98>,
++ <&nsscc 99>;
++ reset-names = "mac",
++ "rx",
++ "tx";
++ };
++
++ port@3 {
++ reg = <3>;
++ phy-mode = "qsgmii";
++ managed = "in-band-status";
++ phy-handle = <&phy2>;
++ pcs-handle = <&pcs0_mii2>;
++ clocks = <&nsscc 47>,
++ <&nsscc 48>,
++ <&nsscc 51>;
++ clock-names = "mac",
++ "rx",
++ "tx";
++ resets = <&nsscc 31>,
++ <&nsscc 100>,
++ <&nsscc 101>;
++ reset-names = "mac",
++ "rx",
++ "tx";
++ };
++
++ port@4 {
++ reg = <4>;
++ phy-mode = "qsgmii";
++ managed = "in-band-status";
++ phy-handle = <&phy3>;
++ pcs-handle = <&pcs0_mii3>;
++ clocks = <&nsscc 54>,
++ <&nsscc 55>,
++ <&nsscc 58>;
++ clock-names = "mac",
++ "rx",
++ "tx";
++ resets = <&nsscc 32>,
++ <&nsscc 102>,
++ <&nsscc 103>;
++ reset-names = "mac",
++ "rx",
++ "tx";
++ };
++
++ port@5 {
++ reg = <5>;
++ phy-mode = "usxgmii";
++ managed = "in-band-status";
++ phy-handle = <&phy4>;
++ pcs-handle = <&pcs1_mii0>;
++ clocks = <&nsscc 61>,
++ <&nsscc 62>,
++ <&nsscc 65>;
++ clock-names = "mac",
++ "rx",
++ "tx";
++ resets = <&nsscc 33>,
++ <&nsscc 104>,
++ <&nsscc 105>;
++ reset-names = "mac",
++ "rx",
++ "tx";
++ };
++
++ port@6 {
++ reg = <6>;
++ phy-mode = "usxgmii";
++ managed = "in-band-status";
++ phy-handle = <&phy5>;
++ pcs-handle = <&pcs2_mii0>;
++ clocks = <&nsscc 68>,
++ <&nsscc 69>,
++ <&nsscc 72>;
++ clock-names = "mac",
++ "rx",
++ "tx";
++ resets = <&nsscc 34>,
++ <&nsscc 106>,
++ <&nsscc 107>;
++ reset-names = "mac",
++ "rx",
++ "tx";
++ };
++ };
++ };
--- /dev/null
+From 9973b6610830146af1a12fe02d2d6440eb80b0f9 Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:36 +0800
+Subject: [PATCH] docs: networking: Add PPE driver documentation for Qualcomm
+ IPQ9574 SoC
+
+Add description and high-level diagram for PPE, driver overview and
+module enable/debug information.
+
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../device_drivers/ethernet/index.rst | 1 +
+ .../ethernet/qualcomm/ppe/ppe.rst | 197 ++++++++++++++++++
+ 2 files changed, 198 insertions(+)
+ create mode 100644 Documentation/networking/device_drivers/ethernet/qualcomm/ppe/ppe.rst
+
+--- a/Documentation/networking/device_drivers/ethernet/index.rst
++++ b/Documentation/networking/device_drivers/ethernet/index.rst
+@@ -49,6 +49,7 @@ Contents:
+ neterion/s2io
+ netronome/nfp
+ pensando/ionic
++ qualcomm/ppe/ppe
+ smsc/smc9
+ stmicro/stmmac
+ ti/cpsw
+--- /dev/null
++++ b/Documentation/networking/device_drivers/ethernet/qualcomm/ppe/ppe.rst
+@@ -0,0 +1,197 @@
++.. SPDX-License-Identifier: GPL-2.0
++
++===============================================
++PPE Ethernet Driver for Qualcomm IPQ SoC Family
++===============================================
++
++Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
++
++Author: Lei Wei <quic_leiwei@quicinc.com>
++
++
++Contents
++========
++
++- `PPE Overview`_
++- `PPE Driver Overview`_
++- `PPE Driver Supported SoCs`_
++- `Enabling the Driver`_
++- `Debugging`_
++
++
++PPE Overview
++============
++
++IPQ (Qualcomm Internet Processor) SoC (System-on-Chip) series is Qualcomm's series of
++networking SoC for Wi-Fi access points. The PPE (Packet Process Engine) is the Ethernet
++packet process engine in the IPQ SoC.
++
++Below is a simplified hardware diagram of IPQ9574 SoC which includes the PPE engine and
++other blocks which are in the SoC but outside the PPE engine. These blocks work together
++to enable the Ethernet for the IPQ SoC::
++
++ +------+ +------+ +------+ +------+ +------+ +------+ start +-------+
++ |netdev| |netdev| |netdev| |netdev| |netdev| |netdev|<------|PHYLINK|
++ +------+ +------+ +------+ +------+ +------+ +------+ stop +-+-+-+-+
++ | | | ^
++ +-------+ +-------------------------+--------+----------------------+ | | |
++ | GCC | | | EDMA | | | | |
++ +---+---+ | PPE +---+----+ | | | |
++ | clk | | | | | |
++ +------>| +-----------------------+------+-----+---------------+ | | | |
++ | | Switch Core |Port0 | |Port7(EIP FIFO)| | | | |
++ | | +---+--+ +------+--------+ | | | |
++ | | | | | | | | |
++ +-------+ | | +------+---------------+----+ | | | | |
++ |CMN PLL| | | +---+ +---+ +----+ | +--------+ | | | | | |
++ +---+---+ | | |BM | |QM | |SCH | | | L2/L3 | ....... | | | | | |
++ | | | | +---+ +---+ +----+ | +--------+ | | | | | |
++ | | | | +------+--------------------+ | | | | |
++ | | | | | | | | | |
++ | v | | +-----+-+-----+-+-----+-+-+---+--+-----+-+-----+ | | | | |
++ | +------+ | | |Port1| |Port2| |Port3| |Port4| |Port5| |Port6| | | | | |
++ | |NSSCC | | | +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ | | mac| | |
++ | +-+-+--+ | | |MAC0 | |MAC1 | |MAC2 | |MAC3 | |MAC4 | |MAC5 | | |<---+ | |
++ | ^ | |clk | | +-----+-+-----+-+-----+-+-----+--+-----+-+-----+ | | ops | |
++ | | | +---->| +----|------|-------|-------|---------|--------|-----+ | | |
++ | | | +---------------------------------------------------------+ | |
++ | | | | | | | | | | |
++ | | | MII clk | QSGMII USXGMII USXGMII | |
++ | | +------------->| | | | | | | |
++ | | +-------------------------+ +---------+ +---------+ | |
++ | |125/312.5M clk| (PCS0) | | (PCS1) | | (PCS2) | pcs ops | |
++ | +--------------+ UNIPHY0 | | UNIPHY1 | | UNIPHY2 |<--------+ |
++ +--------------->| | | | | | |
++ | 31.25M ref clk +-------------------------+ +---------+ +---------+ |
++ | | | | | | | |
++ | +-----------------------------------------------------+ |
++ |25/50M ref clk| +-------------------------+ +------+ +------+ | link |
++ +------------->| | QUAD PHY | | PHY4 | | PHY5 | |---------+
++ | +-------------------------+ +------+ +------+ | change
++ | |
++ | MDIO bus |
++ +-----------------------------------------------------+
++
++The CMN (Common) PLL, NSSCC (Networking Sub System Clock Controller) and GCC (Global
++Clock Controller) blocks are in the SoC and act as clock providers.
++
++The UNIPHY block is in the SoC and provides the PCS (Physical Coding Sublayer) and
++XPCS (10-Gigabit Physical Coding Sublayer) functions to support different interface
++modes between the PPE MAC and the external PHY.
++
++This documentation focuses on the descriptions of PPE engine and the PPE driver.
++
++The Ethernet functionality in the PPE (Packet Process Engine) is comprised of three
++components: the switch core, port wrapper and Ethernet DMA.
++
++The Switch core in the IPQ9574 PPE has maximum of 6 front panel ports and two FIFO
++interfaces. One of the two FIFO interfaces is used for Ethernet port to host CPU
++communication using Ethernet DMA. The other is used communicating to the EIP engine
++which is used for IPsec offload. On the IPQ9574, the PPE includes 6 GMAC/XGMACs that
++can be connected with external Ethernet PHY. Switch core also includes BM (Buffer
++Management), QM (Queue Management) and SCH (Scheduler) modules for supporting the
++packet processing.
++
++The port wrapper provides connections from the 6 GMAC/XGMACS to UNIPHY (PCS) supporting
++various modes such as SGMII/QSGMII/PSGMII/USXGMII/10G-BASER. There are 3 UNIPHY (PCS)
++instances supported on the IPQ9574.
++
++Ethernet DMA is used to transmit and receive packets between the Ethernet subsystem
++and ARM host CPU.
++
++The following lists the main blocks in the PPE engine which will be driven by this
++PPE driver:
++
++- BM
++ BM is the hardware buffer manager for the PPE switch ports.
++- QM
++ Queue Manager for managing the egress hardware queues of the PPE switch ports.
++- SCH
++ The scheduler which manages the hardware traffic scheduling for the PPE switch ports.
++- L2
++ The L2 block performs the packet bridging in the switch core. The bridge domain is
++ represented by the VSI (Virtual Switch Instance) domain in PPE. FDB learning can be
++ enabled based on the VSI domain and bridge forwarding occurs within the VSI domain.
++- MAC
++ The PPE in the IPQ9574 supports up to six MACs (MAC0 to MAC5) which are corresponding
++ to six switch ports (port1 to port6). The MAC block is connected with external PHY
++ through the UNIPHY PCS block. Each MAC block includes the GMAC and XGMAC blocks and
++ the switch port can select to use GMAC or XMAC through a MUX selection according to
++ the external PHY's capability.
++- EDMA (Ethernet DMA)
++ The Ethernet DMA is used to transmit and receive Ethernet packets between the PPE
++ ports and the ARM cores.
++
++The received packet on a PPE MAC port can be forwarded to another PPE MAC port. It can
++be also forwarded to internal switch port0 so that the packet can be delivered to the
++ARM cores using the Ethernet DMA (EDMA) engine. The Ethernet DMA driver will deliver the
++packet to the corresponding 'netdevice' interface.
++
++The software instantiations of the PPE MAC (netdevice), PCS and external PHYs interact
++with the Linux PHYLINK framework to manage the connectivity between the PPE ports and
++the connected PHYs, and the port link states. This is also illustrated in above diagram.
++
++
++PPE Driver Overview
++===================
++PPE driver is Ethernet driver for the Qualcomm IPQ SoC. It is a single platform driver
++which includes the PPE part and Ethernet DMA part. The PPE part initializes and drives the
++various blocks in PPE switch core such as BM/QM/L2 blocks and the PPE MACs. The EDMA part
++drives the Ethernet DMA for packet transfer between PPE ports and ARM cores, and enables
++the netdevice driver for the PPE ports.
++
++The PPE driver files in drivers/net/ethernet/qualcomm/ppe/ are listed as below:
++
++- Makefile
++- ppe.c
++- ppe.h
++- ppe_config.c
++- ppe_config.h
++- ppe_debugfs.c
++- ppe_debugfs.h
++- ppe_regs.h
++
++The ppe.c file contains the main PPE platform driver and undertakes the initialization of
++PPE switch core blocks such as QM, BM and L2. The configuration APIs for these hardware
++blocks are provided in the ppe_config.c file.
++
++The ppe.h defines the PPE device data structure which will be used by PPE driver functions.
++
++The ppe_debugfs.c enables the PPE statistics counters such as PPE port Rx and Tx counters,
++CPU code counters and queue counters.
++
++
++PPE Driver Supported SoCs
++=========================
++
++The PPE driver supports the following IPQ SoC:
++
++- IPQ9574
++
++
++Enabling the Driver
++===================
++
++The driver is located in the menu structure at:
++
++ -> Device Drivers
++ -> Network device support (NETDEVICES [=y])
++ -> Ethernet driver support
++ -> Qualcomm devices
++ -> Qualcomm Technologies, Inc. PPE Ethernet support
++
++If this driver is built as a module, we can use below commands to install and remove it:
++
++- insmod qcom-ppe.ko
++- rmmod qcom-ppe.ko
++
++The PPE driver functionally depends on the CMN PLL and NSSCC clock controller drivers.
++Please make sure the dependent modules are installed before installing the PPE driver
++module.
++
++
++Debugging
++=========
++
++The PPE hardware counters are available in the debugfs and can be checked by the command
++``cat /sys/kernel/debug/ppe/packet_counters``.
--- /dev/null
+From d1158f0282304c89217894aa346fc45364b95542 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:37 +0800
+Subject: [PATCH] net: ethernet: qualcomm: Add PPE driver for IPQ9574 SoC
+
+The PPE (Packet Process Engine) hardware block is available
+on Qualcomm IPQ SoC that support PPE architecture, such as
+IPQ9574.
+
+The PPE in IPQ9574 includes six integrated ethernet MAC
+(for 6 PPE ports), buffer management, queue management and
+scheduler functions. The MACs can connect with the external
+PHY or switch devices using the UNIPHY PCS block available
+in the SoC.
+
+The PPE also includes various packet processing offload
+capabilities such as L3 routing and L2 bridging, VLAN and
+tunnel processing offload. It also includes Ethernet DMA
+function for transferring packets between ARM cores and
+PPE ethernet ports.
+
+This patch adds the base source files and Makefiles for
+the PPE driver such as platform driver registration,
+clock initialization, and PPE reset routines.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/Kconfig | 15 ++
+ drivers/net/ethernet/qualcomm/Makefile | 1 +
+ drivers/net/ethernet/qualcomm/ppe/Makefile | 7 +
+ drivers/net/ethernet/qualcomm/ppe/ppe.c | 218 +++++++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/ppe.h | 36 ++++
+ 5 files changed, 277 insertions(+)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/Makefile
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe.h
+
+--- a/drivers/net/ethernet/qualcomm/Kconfig
++++ b/drivers/net/ethernet/qualcomm/Kconfig
+@@ -61,6 +61,21 @@ config QCOM_EMAC
+ low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
+ Precision Clock Synchronization Protocol.
+
++config QCOM_PPE
++ tristate "Qualcomm Technologies, Inc. PPE Ethernet support"
++ depends on HAS_IOMEM && OF
++ depends on COMMON_CLK
++ select REGMAP_MMIO
++ help
++ This driver supports the Qualcomm Technologies, Inc. packet
++ process engine (PPE) available with IPQ SoC. The PPE includes
++ the ethernet MACs, Ethernet DMA (EDMA) and switch core that
++ supports L3 flow offload, L2 switch function, RSS and tunnel
++ offload.
++
++ To compile this driver as a module, choose M here. The module
++ will be called qcom-ppe.
++
+ source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
+
+ endif # NET_VENDOR_QUALCOMM
+--- a/drivers/net/ethernet/qualcomm/Makefile
++++ b/drivers/net/ethernet/qualcomm/Makefile
+@@ -11,4 +11,5 @@ qcauart-objs := qca_uart.o
+
+ obj-y += emac/
+
++obj-$(CONFIG_QCOM_PPE) += ppe/
+ obj-$(CONFIG_RMNET) += rmnet/
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -0,0 +1,7 @@
++# SPDX-License-Identifier: GPL-2.0-only
++#
++# Makefile for the device driver of PPE (Packet Process Engine) in IPQ SoC
++#
++
++obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
++qcom-ppe-objs := ppe.o
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+@@ -0,0 +1,218 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* PPE platform device probe, DTSI parser and PPE clock initializations. */
++
++#include <linux/clk.h>
++#include <linux/interconnect.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/regmap.h>
++#include <linux/reset.h>
++
++#include "ppe.h"
++
++#define PPE_PORT_MAX 8
++#define PPE_CLK_RATE 353000000
++
++/* ICC clocks for enabling PPE device. The avg_bw and peak_bw with value 0
++ * will be updated by the clock rate of PPE.
++ */
++static const struct icc_bulk_data ppe_icc_data[] = {
++ {
++ .name = "ppe",
++ .avg_bw = 0,
++ .peak_bw = 0,
++ },
++ {
++ .name = "ppe_cfg",
++ .avg_bw = 0,
++ .peak_bw = 0,
++ },
++ {
++ .name = "qos_gen",
++ .avg_bw = 6000,
++ .peak_bw = 6000,
++ },
++ {
++ .name = "timeout_ref",
++ .avg_bw = 6000,
++ .peak_bw = 6000,
++ },
++ {
++ .name = "nssnoc_memnoc",
++ .avg_bw = 533333,
++ .peak_bw = 533333,
++ },
++ {
++ .name = "memnoc_nssnoc",
++ .avg_bw = 533333,
++ .peak_bw = 533333,
++ },
++ {
++ .name = "memnoc_nssnoc_1",
++ .avg_bw = 533333,
++ .peak_bw = 533333,
++ },
++};
++
++static const struct regmap_range ppe_readable_ranges[] = {
++ regmap_reg_range(0x0, 0x1ff), /* Global */
++ regmap_reg_range(0x400, 0x5ff), /* LPI CSR */
++ regmap_reg_range(0x1000, 0x11ff), /* GMAC0 */
++ regmap_reg_range(0x1200, 0x13ff), /* GMAC1 */
++ regmap_reg_range(0x1400, 0x15ff), /* GMAC2 */
++ regmap_reg_range(0x1600, 0x17ff), /* GMAC3 */
++ regmap_reg_range(0x1800, 0x19ff), /* GMAC4 */
++ regmap_reg_range(0x1a00, 0x1bff), /* GMAC5 */
++ regmap_reg_range(0xb000, 0xefff), /* PRX CSR */
++ regmap_reg_range(0xf000, 0x1efff), /* IPE */
++ regmap_reg_range(0x20000, 0x5ffff), /* PTX CSR */
++ regmap_reg_range(0x60000, 0x9ffff), /* IPE L2 CSR */
++ regmap_reg_range(0xb0000, 0xeffff), /* IPO CSR */
++ regmap_reg_range(0x100000, 0x17ffff), /* IPE PC */
++ regmap_reg_range(0x180000, 0x1bffff), /* PRE IPO CSR */
++ regmap_reg_range(0x1d0000, 0x1dffff), /* Tunnel parser */
++ regmap_reg_range(0x1e0000, 0x1effff), /* Ingress parse */
++ regmap_reg_range(0x200000, 0x2fffff), /* IPE L3 */
++ regmap_reg_range(0x300000, 0x3fffff), /* IPE tunnel */
++ regmap_reg_range(0x400000, 0x4fffff), /* Scheduler */
++ regmap_reg_range(0x500000, 0x503fff), /* XGMAC0 */
++ regmap_reg_range(0x504000, 0x507fff), /* XGMAC1 */
++ regmap_reg_range(0x508000, 0x50bfff), /* XGMAC2 */
++ regmap_reg_range(0x50c000, 0x50ffff), /* XGMAC3 */
++ regmap_reg_range(0x510000, 0x513fff), /* XGMAC4 */
++ regmap_reg_range(0x514000, 0x517fff), /* XGMAC5 */
++ regmap_reg_range(0x600000, 0x6fffff), /* BM */
++ regmap_reg_range(0x800000, 0x9fffff), /* QM */
++ regmap_reg_range(0xb00000, 0xbef800), /* EDMA */
++};
++
++static const struct regmap_access_table ppe_reg_table = {
++ .yes_ranges = ppe_readable_ranges,
++ .n_yes_ranges = ARRAY_SIZE(ppe_readable_ranges),
++};
++
++static const struct regmap_config regmap_config_ipq9574 = {
++ .reg_bits = 32,
++ .reg_stride = 4,
++ .val_bits = 32,
++ .rd_table = &ppe_reg_table,
++ .wr_table = &ppe_reg_table,
++ .max_register = 0xbef800,
++ .fast_io = true,
++};
++
++static int ppe_clock_init_and_reset(struct ppe_device *ppe_dev)
++{
++ unsigned long ppe_rate = ppe_dev->clk_rate;
++ struct device *dev = ppe_dev->dev;
++ struct reset_control *rstc;
++ struct clk_bulk_data *clks;
++ struct clk *clk;
++ int ret, i;
++
++ for (i = 0; i < ppe_dev->num_icc_paths; i++) {
++ ppe_dev->icc_paths[i].name = ppe_icc_data[i].name;
++ ppe_dev->icc_paths[i].avg_bw = ppe_icc_data[i].avg_bw ? :
++ Bps_to_icc(ppe_rate);
++ ppe_dev->icc_paths[i].peak_bw = ppe_icc_data[i].peak_bw ? :
++ Bps_to_icc(ppe_rate);
++ }
++
++ ret = devm_of_icc_bulk_get(dev, ppe_dev->num_icc_paths,
++ ppe_dev->icc_paths);
++ if (ret)
++ return ret;
++
++ ret = icc_bulk_set_bw(ppe_dev->num_icc_paths, ppe_dev->icc_paths);
++ if (ret)
++ return ret;
++
++ /* The PPE clocks have a common parent clock. Setting the clock
++ * rate of "ppe" ensures the clock rate of all PPE clocks is
++ * configured to the same rate.
++ */
++ clk = devm_clk_get(dev, "ppe");
++ if (IS_ERR(clk))
++ return PTR_ERR(clk);
++
++ ret = clk_set_rate(clk, ppe_rate);
++ if (ret)
++ return ret;
++
++ ret = devm_clk_bulk_get_all_enable(dev, &clks);
++ if (ret < 0)
++ return ret;
++
++ /* Reset the PPE. */
++ rstc = devm_reset_control_get_exclusive(dev, NULL);
++ if (IS_ERR(rstc))
++ return PTR_ERR(rstc);
++
++ ret = reset_control_assert(rstc);
++ if (ret)
++ return ret;
++
++ /* The delay 10 ms of assert is necessary for resetting PPE. */
++ usleep_range(10000, 11000);
++
++ return reset_control_deassert(rstc);
++}
++
++static int qcom_ppe_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct ppe_device *ppe_dev;
++ void __iomem *base;
++ int ret, num_icc;
++
++ num_icc = ARRAY_SIZE(ppe_icc_data);
++ ppe_dev = devm_kzalloc(dev, struct_size(ppe_dev, icc_paths, num_icc),
++ GFP_KERNEL);
++ if (!ppe_dev)
++ return -ENOMEM;
++
++ base = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(base))
++ return dev_err_probe(dev, PTR_ERR(base), "PPE ioremap failed\n");
++
++ ppe_dev->regmap = devm_regmap_init_mmio(dev, base, ®map_config_ipq9574);
++ if (IS_ERR(ppe_dev->regmap))
++ return dev_err_probe(dev, PTR_ERR(ppe_dev->regmap),
++ "PPE initialize regmap failed\n");
++ ppe_dev->dev = dev;
++ ppe_dev->clk_rate = PPE_CLK_RATE;
++ ppe_dev->num_ports = PPE_PORT_MAX;
++ ppe_dev->num_icc_paths = num_icc;
++
++ ret = ppe_clock_init_and_reset(ppe_dev);
++ if (ret)
++ return dev_err_probe(dev, ret, "PPE clock config failed\n");
++
++ platform_set_drvdata(pdev, ppe_dev);
++
++ return 0;
++}
++
++static const struct of_device_id qcom_ppe_of_match[] = {
++ { .compatible = "qcom,ipq9574-ppe" },
++ {}
++};
++MODULE_DEVICE_TABLE(of, qcom_ppe_of_match);
++
++static struct platform_driver qcom_ppe_driver = {
++ .driver = {
++ .name = "qcom_ppe",
++ .of_match_table = qcom_ppe_of_match,
++ },
++ .probe = qcom_ppe_probe,
++};
++module_platform_driver(qcom_ppe_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ PPE driver");
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.h
+@@ -0,0 +1,36 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ *
++ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef __PPE_H__
++#define __PPE_H__
++
++#include <linux/compiler.h>
++#include <linux/interconnect.h>
++
++struct device;
++struct regmap;
++
++/**
++ * struct ppe_device - PPE device private data.
++ * @dev: PPE device structure.
++ * @regmap: PPE register map.
++ * @clk_rate: PPE clock rate.
++ * @num_ports: Number of PPE ports.
++ * @num_icc_paths: Number of interconnect paths.
++ * @icc_paths: Interconnect path array.
++ *
++ * PPE device is the instance of PPE hardware, which is used to
++ * configure PPE packet process modules such as BM (buffer management),
++ * QM (queue management), and scheduler.
++ */
++struct ppe_device {
++ struct device *dev;
++ struct regmap *regmap;
++ unsigned long clk_rate;
++ unsigned int num_ports;
++ unsigned int num_icc_paths;
++ struct icc_bulk_data icc_paths[] __counted_by(num_icc_paths);
++};
++#endif
--- /dev/null
+From 6e639ab45348ee7a697db8b481fa6f8555280f58 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:38 +0800
+Subject: [PATCH] net: ethernet: qualcomm: Initialize PPE buffer management for
+ IPQ9574
+
+The BM (Buffer Management) config controls the pause frame generated
+on the PPE port. There are maximum 15 BM ports and 4 groups supported,
+all BM ports are assigned to group 0 by default. The number of hardware
+buffers configured for the port influence the threshold of the flow
+control for that port.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
+ drivers/net/ethernet/qualcomm/ppe/ppe.c | 5 +
+ .../net/ethernet/qualcomm/ppe/ppe_config.c | 195 ++++++++++++++++++
+ .../net/ethernet/qualcomm/ppe/ppe_config.h | 12 ++
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 59 ++++++
+ 5 files changed, 272 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+
+--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -4,4 +4,4 @@
+ #
+
+ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+-qcom-ppe-objs := ppe.o
++qcom-ppe-objs := ppe.o ppe_config.o
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+@@ -15,6 +15,7 @@
+ #include <linux/reset.h>
+
+ #include "ppe.h"
++#include "ppe_config.h"
+
+ #define PPE_PORT_MAX 8
+ #define PPE_CLK_RATE 353000000
+@@ -194,6 +195,10 @@ static int qcom_ppe_probe(struct platfor
+ if (ret)
+ return dev_err_probe(dev, ret, "PPE clock config failed\n");
+
++ ret = ppe_hw_config(ppe_dev);
++ if (ret)
++ return dev_err_probe(dev, ret, "PPE HW config failed\n");
++
+ platform_set_drvdata(pdev, ppe_dev);
+
+ return 0;
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -0,0 +1,195 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* PPE HW initialization configs such as BM(buffer management),
++ * QM(queue management) and scheduler configs.
++ */
++
++#include <linux/bitfield.h>
++#include <linux/bits.h>
++#include <linux/device.h>
++#include <linux/regmap.h>
++
++#include "ppe.h"
++#include "ppe_config.h"
++#include "ppe_regs.h"
++
++/**
++ * struct ppe_bm_port_config - PPE BM port configuration.
++ * @port_id_start: The fist BM port ID to configure.
++ * @port_id_end: The last BM port ID to configure.
++ * @pre_alloc: BM port dedicated buffer number.
++ * @in_fly_buf: Buffer number for receiving the packet after pause frame sent.
++ * @ceil: Ceil to generate the back pressure.
++ * @weight: Weight value.
++ * @resume_offset: Resume offset from the threshold value.
++ * @resume_ceil: Ceil to resume from the back pressure state.
++ * @dynamic: Dynamic threshold used or not.
++ *
++ * The is for configuring the threshold that impacts the port
++ * flow control.
++ */
++struct ppe_bm_port_config {
++ unsigned int port_id_start;
++ unsigned int port_id_end;
++ unsigned int pre_alloc;
++ unsigned int in_fly_buf;
++ unsigned int ceil;
++ unsigned int weight;
++ unsigned int resume_offset;
++ unsigned int resume_ceil;
++ bool dynamic;
++};
++
++/* Assign the share buffer number 1550 to group 0 by default. */
++static const int ipq9574_ppe_bm_group_config = 1550;
++
++/* The buffer configurations per PPE port. There are 15 BM ports and
++ * 4 BM groups supported by PPE. BM port (0-7) is for EDMA port 0,
++ * BM port (8-13) is for PPE physical port 1-6 and BM port 14 is for
++ * EIP port.
++ */
++static const struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
++ {
++ /* Buffer configuration for the BM port ID 0 of EDMA. */
++ .port_id_start = 0,
++ .port_id_end = 0,
++ .pre_alloc = 0,
++ .in_fly_buf = 100,
++ .ceil = 1146,
++ .weight = 7,
++ .resume_offset = 8,
++ .resume_ceil = 0,
++ .dynamic = true,
++ },
++ {
++ /* Buffer configuration for the BM port ID 1-7 of EDMA. */
++ .port_id_start = 1,
++ .port_id_end = 7,
++ .pre_alloc = 0,
++ .in_fly_buf = 100,
++ .ceil = 250,
++ .weight = 4,
++ .resume_offset = 36,
++ .resume_ceil = 0,
++ .dynamic = true,
++ },
++ {
++ /* Buffer configuration for the BM port ID 8-13 of PPE ports. */
++ .port_id_start = 8,
++ .port_id_end = 13,
++ .pre_alloc = 0,
++ .in_fly_buf = 128,
++ .ceil = 250,
++ .weight = 4,
++ .resume_offset = 36,
++ .resume_ceil = 0,
++ .dynamic = true,
++ },
++ {
++ /* Buffer configuration for the BM port ID 14 of EIP. */
++ .port_id_start = 14,
++ .port_id_end = 14,
++ .pre_alloc = 0,
++ .in_fly_buf = 40,
++ .ceil = 250,
++ .weight = 4,
++ .resume_offset = 36,
++ .resume_ceil = 0,
++ .dynamic = true,
++ },
++};
++
++static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
++ const struct ppe_bm_port_config port_cfg)
++{
++ u32 reg, val, bm_fc_val[2];
++ int ret;
++
++ reg = PPE_BM_PORT_FC_CFG_TBL_ADDR + PPE_BM_PORT_FC_CFG_TBL_INC * bm_port_id;
++ ret = regmap_bulk_read(ppe_dev->regmap, reg,
++ bm_fc_val, ARRAY_SIZE(bm_fc_val));
++ if (ret)
++ return ret;
++
++ /* Configure BM flow control related threshold. */
++ PPE_BM_PORT_FC_SET_WEIGHT(bm_fc_val, port_cfg.weight);
++ PPE_BM_PORT_FC_SET_RESUME_OFFSET(bm_fc_val, port_cfg.resume_offset);
++ PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(bm_fc_val, port_cfg.resume_ceil);
++ PPE_BM_PORT_FC_SET_DYNAMIC(bm_fc_val, port_cfg.dynamic);
++ PPE_BM_PORT_FC_SET_REACT_LIMIT(bm_fc_val, port_cfg.in_fly_buf);
++ PPE_BM_PORT_FC_SET_PRE_ALLOC(bm_fc_val, port_cfg.pre_alloc);
++
++ /* Configure low/high bits of the ceiling for the BM port. */
++ val = FIELD_GET(GENMASK(2, 0), port_cfg.ceil);
++ PPE_BM_PORT_FC_SET_CEILING_LOW(bm_fc_val, val);
++ val = FIELD_GET(GENMASK(10, 3), port_cfg.ceil);
++ PPE_BM_PORT_FC_SET_CEILING_HIGH(bm_fc_val, val);
++
++ ret = regmap_bulk_write(ppe_dev->regmap, reg,
++ bm_fc_val, ARRAY_SIZE(bm_fc_val));
++ if (ret)
++ return ret;
++
++ /* Assign the default group ID 0 to the BM port. */
++ val = FIELD_PREP(PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID, 0);
++ reg = PPE_BM_PORT_GROUP_ID_ADDR + PPE_BM_PORT_GROUP_ID_INC * bm_port_id;
++ ret = regmap_update_bits(ppe_dev->regmap, reg,
++ PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID,
++ val);
++ if (ret)
++ return ret;
++
++ /* Enable BM port flow control. */
++ reg = PPE_BM_PORT_FC_MODE_ADDR + PPE_BM_PORT_FC_MODE_INC * bm_port_id;
++
++ return regmap_set_bits(ppe_dev->regmap, reg, PPE_BM_PORT_FC_MODE_EN);
++}
++
++/* Configure the buffer threshold for the port flow control function. */
++static int ppe_config_bm(struct ppe_device *ppe_dev)
++{
++ const struct ppe_bm_port_config *port_cfg;
++ unsigned int i, bm_port_id, port_cfg_cnt;
++ u32 reg, val;
++ int ret;
++
++ /* Configure the allocated buffer number only for group 0.
++ * The buffer number of group 1-3 is already cleared to 0
++ * after PPE reset during the probe of PPE driver.
++ */
++ reg = PPE_BM_SHARED_GROUP_CFG_ADDR;
++ val = FIELD_PREP(PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
++ ipq9574_ppe_bm_group_config);
++ ret = regmap_update_bits(ppe_dev->regmap, reg,
++ PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
++ val);
++ if (ret)
++ goto bm_config_fail;
++
++ /* Configure buffer thresholds for the BM ports. */
++ port_cfg = ipq9574_ppe_bm_port_config;
++ port_cfg_cnt = ARRAY_SIZE(ipq9574_ppe_bm_port_config);
++ for (i = 0; i < port_cfg_cnt; i++) {
++ for (bm_port_id = port_cfg[i].port_id_start;
++ bm_port_id <= port_cfg[i].port_id_end; bm_port_id++) {
++ ret = ppe_config_bm_threshold(ppe_dev, bm_port_id,
++ port_cfg[i]);
++ if (ret)
++ goto bm_config_fail;
++ }
++ }
++
++ return 0;
++
++bm_config_fail:
++ dev_err(ppe_dev->dev, "PPE BM config error %d\n", ret);
++ return ret;
++}
++
++int ppe_hw_config(struct ppe_device *ppe_dev)
++{
++ return ppe_config_bm(ppe_dev);
++}
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -0,0 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ *
++ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef __PPE_CONFIG_H__
++#define __PPE_CONFIG_H__
++
++#include "ppe.h"
++
++int ppe_hw_config(struct ppe_device *ppe_dev);
++#endif
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -0,0 +1,59 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ *
++ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* PPE hardware register and table declarations. */
++#ifndef __PPE_REGS_H__
++#define __PPE_REGS_H__
++
++#include <linux/bitfield.h>
++
++/* There are 15 BM ports and 4 BM groups supported by PPE.
++ * BM port (0-7) is for EDMA port 0, BM port (8-13) is for
++ * PPE physical port 1-6 and BM port 14 is for EIP port.
++ */
++#define PPE_BM_PORT_FC_MODE_ADDR 0x600100
++#define PPE_BM_PORT_FC_MODE_ENTRIES 15
++#define PPE_BM_PORT_FC_MODE_INC 0x4
++#define PPE_BM_PORT_FC_MODE_EN BIT(0)
++
++#define PPE_BM_PORT_GROUP_ID_ADDR 0x600180
++#define PPE_BM_PORT_GROUP_ID_ENTRIES 15
++#define PPE_BM_PORT_GROUP_ID_INC 0x4
++#define PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID GENMASK(1, 0)
++
++#define PPE_BM_SHARED_GROUP_CFG_ADDR 0x600290
++#define PPE_BM_SHARED_GROUP_CFG_ENTRIES 4
++#define PPE_BM_SHARED_GROUP_CFG_INC 0x4
++#define PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT GENMASK(10, 0)
++
++#define PPE_BM_PORT_FC_CFG_TBL_ADDR 0x601000
++#define PPE_BM_PORT_FC_CFG_TBL_ENTRIES 15
++#define PPE_BM_PORT_FC_CFG_TBL_INC 0x10
++#define PPE_BM_PORT_FC_W0_REACT_LIMIT GENMASK(8, 0)
++#define PPE_BM_PORT_FC_W0_RESUME_THRESHOLD GENMASK(17, 9)
++#define PPE_BM_PORT_FC_W0_RESUME_OFFSET GENMASK(28, 18)
++#define PPE_BM_PORT_FC_W0_CEILING_LOW GENMASK(31, 29)
++#define PPE_BM_PORT_FC_W1_CEILING_HIGH GENMASK(7, 0)
++#define PPE_BM_PORT_FC_W1_WEIGHT GENMASK(10, 8)
++#define PPE_BM_PORT_FC_W1_DYNAMIC BIT(11)
++#define PPE_BM_PORT_FC_W1_PRE_ALLOC GENMASK(22, 12)
++
++#define PPE_BM_PORT_FC_SET_REACT_LIMIT(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_BM_PORT_FC_W0_REACT_LIMIT)
++#define PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_BM_PORT_FC_W0_RESUME_THRESHOLD)
++#define PPE_BM_PORT_FC_SET_RESUME_OFFSET(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_BM_PORT_FC_W0_RESUME_OFFSET)
++#define PPE_BM_PORT_FC_SET_CEILING_LOW(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_BM_PORT_FC_W0_CEILING_LOW)
++#define PPE_BM_PORT_FC_SET_CEILING_HIGH(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_CEILING_HIGH)
++#define PPE_BM_PORT_FC_SET_WEIGHT(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_WEIGHT)
++#define PPE_BM_PORT_FC_SET_DYNAMIC(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_DYNAMIC)
++#define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_PRE_ALLOC)
++#endif
--- /dev/null
+From 9be6c3590ef3c241e6a3cfd05291304a1f973bcf Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:39 +0800
+Subject: [PATCH] net: ethernet: qualcomm: Initialize PPE queue management for
+ IPQ9574
+
+QM (queue management) configurations decide the length of PPE
+queues and the queue depth for these queues which are used to
+drop packets in events of congestion.
+
+There are two types of PPE queues - unicast queues (0-255) and
+multicast queues (256-299). These queue types are used to forward
+different types of traffic, and are configured with different
+lengths.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../net/ethernet/qualcomm/ppe/ppe_config.c | 177 +++++++++++++++++-
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 85 +++++++++
+ 2 files changed, 261 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -43,6 +43,29 @@ struct ppe_bm_port_config {
+ bool dynamic;
+ };
+
++/**
++ * struct ppe_qm_queue_config - PPE queue config.
++ * @queue_start: PPE start of queue ID.
++ * @queue_end: PPE end of queue ID.
++ * @prealloc_buf: Queue dedicated buffer number.
++ * @ceil: Ceil to start drop packet from queue.
++ * @weight: Weight value.
++ * @resume_offset: Resume offset from the threshold.
++ * @dynamic: Threshold value is decided dynamically or statically.
++ *
++ * Queue configuration decides the threshold to drop packet from PPE
++ * hardware queue.
++ */
++struct ppe_qm_queue_config {
++ unsigned int queue_start;
++ unsigned int queue_end;
++ unsigned int prealloc_buf;
++ unsigned int ceil;
++ unsigned int weight;
++ unsigned int resume_offset;
++ bool dynamic;
++};
++
+ /* Assign the share buffer number 1550 to group 0 by default. */
+ static const int ipq9574_ppe_bm_group_config = 1550;
+
+@@ -102,6 +125,33 @@ static const struct ppe_bm_port_config i
+ },
+ };
+
++/* Default QM group settings for IPQ9754. */
++static const int ipq9574_ppe_qm_group_config = 2000;
++
++/* Default QM settings for unicast and multicast queues for IPQ9754. */
++static const struct ppe_qm_queue_config ipq9574_ppe_qm_queue_config[] = {
++ {
++ /* QM settings for unicast queues 0 to 255. */
++ .queue_start = 0,
++ .queue_end = 255,
++ .prealloc_buf = 0,
++ .ceil = 1200,
++ .weight = 7,
++ .resume_offset = 36,
++ .dynamic = true,
++ },
++ {
++ /* QM settings for multicast queues 256 to 299. */
++ .queue_start = 256,
++ .queue_end = 299,
++ .prealloc_buf = 0,
++ .ceil = 250,
++ .weight = 0,
++ .resume_offset = 36,
++ .dynamic = false,
++ },
++};
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+ const struct ppe_bm_port_config port_cfg)
+ {
+@@ -189,7 +239,132 @@ bm_config_fail:
+ return ret;
+ }
+
++/* Configure PPE hardware queue depth, which is decided by the threshold
++ * of queue.
++ */
++static int ppe_config_qm(struct ppe_device *ppe_dev)
++{
++ const struct ppe_qm_queue_config *queue_cfg;
++ int ret, i, queue_id, queue_cfg_count;
++ u32 reg, multicast_queue_cfg[5];
++ u32 unicast_queue_cfg[4];
++ u32 group_cfg[3];
++
++ /* Assign the buffer number to the group 0 by default. */
++ reg = PPE_AC_GRP_CFG_TBL_ADDR;
++ ret = regmap_bulk_read(ppe_dev->regmap, reg,
++ group_cfg, ARRAY_SIZE(group_cfg));
++ if (ret)
++ goto qm_config_fail;
++
++ PPE_AC_GRP_SET_BUF_LIMIT(group_cfg, ipq9574_ppe_qm_group_config);
++
++ ret = regmap_bulk_write(ppe_dev->regmap, reg,
++ group_cfg, ARRAY_SIZE(group_cfg));
++ if (ret)
++ goto qm_config_fail;
++
++ queue_cfg = ipq9574_ppe_qm_queue_config;
++ queue_cfg_count = ARRAY_SIZE(ipq9574_ppe_qm_queue_config);
++ for (i = 0; i < queue_cfg_count; i++) {
++ queue_id = queue_cfg[i].queue_start;
++
++ /* Configure threshold for dropping packets separately for
++ * unicast and multicast PPE queues.
++ */
++ while (queue_id <= queue_cfg[i].queue_end) {
++ if (queue_id < PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES) {
++ reg = PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR +
++ PPE_AC_UNICAST_QUEUE_CFG_TBL_INC * queue_id;
++
++ ret = regmap_bulk_read(ppe_dev->regmap, reg,
++ unicast_queue_cfg,
++ ARRAY_SIZE(unicast_queue_cfg));
++ if (ret)
++ goto qm_config_fail;
++
++ PPE_AC_UNICAST_QUEUE_SET_EN(unicast_queue_cfg, true);
++ PPE_AC_UNICAST_QUEUE_SET_GRP_ID(unicast_queue_cfg, 0);
++ PPE_AC_UNICAST_QUEUE_SET_PRE_LIMIT(unicast_queue_cfg,
++ queue_cfg[i].prealloc_buf);
++ PPE_AC_UNICAST_QUEUE_SET_DYNAMIC(unicast_queue_cfg,
++ queue_cfg[i].dynamic);
++ PPE_AC_UNICAST_QUEUE_SET_WEIGHT(unicast_queue_cfg,
++ queue_cfg[i].weight);
++ PPE_AC_UNICAST_QUEUE_SET_THRESHOLD(unicast_queue_cfg,
++ queue_cfg[i].ceil);
++ PPE_AC_UNICAST_QUEUE_SET_GRN_RESUME(unicast_queue_cfg,
++ queue_cfg[i].resume_offset);
++
++ ret = regmap_bulk_write(ppe_dev->regmap, reg,
++ unicast_queue_cfg,
++ ARRAY_SIZE(unicast_queue_cfg));
++ if (ret)
++ goto qm_config_fail;
++ } else {
++ reg = PPE_AC_MULTICAST_QUEUE_CFG_TBL_ADDR +
++ PPE_AC_MULTICAST_QUEUE_CFG_TBL_INC * queue_id;
++
++ ret = regmap_bulk_read(ppe_dev->regmap, reg,
++ multicast_queue_cfg,
++ ARRAY_SIZE(multicast_queue_cfg));
++ if (ret)
++ goto qm_config_fail;
++
++ PPE_AC_MULTICAST_QUEUE_SET_EN(multicast_queue_cfg, true);
++ PPE_AC_MULTICAST_QUEUE_SET_GRN_GRP_ID(multicast_queue_cfg, 0);
++ PPE_AC_MULTICAST_QUEUE_SET_GRN_PRE_LIMIT(multicast_queue_cfg,
++ queue_cfg[i].prealloc_buf);
++ PPE_AC_MULTICAST_QUEUE_SET_GRN_THRESHOLD(multicast_queue_cfg,
++ queue_cfg[i].ceil);
++ PPE_AC_MULTICAST_QUEUE_SET_GRN_RESUME(multicast_queue_cfg,
++ queue_cfg[i].resume_offset);
++
++ ret = regmap_bulk_write(ppe_dev->regmap, reg,
++ multicast_queue_cfg,
++ ARRAY_SIZE(multicast_queue_cfg));
++ if (ret)
++ goto qm_config_fail;
++ }
++
++ /* Enable enqueue. */
++ reg = PPE_ENQ_OPR_TBL_ADDR + PPE_ENQ_OPR_TBL_INC * queue_id;
++ ret = regmap_clear_bits(ppe_dev->regmap, reg,
++ PPE_ENQ_OPR_TBL_ENQ_DISABLE);
++ if (ret)
++ goto qm_config_fail;
++
++ /* Enable dequeue. */
++ reg = PPE_DEQ_OPR_TBL_ADDR + PPE_DEQ_OPR_TBL_INC * queue_id;
++ ret = regmap_clear_bits(ppe_dev->regmap, reg,
++ PPE_DEQ_OPR_TBL_DEQ_DISABLE);
++ if (ret)
++ goto qm_config_fail;
++
++ queue_id++;
++ }
++ }
++
++ /* Enable queue counter for all PPE hardware queues. */
++ ret = regmap_set_bits(ppe_dev->regmap, PPE_EG_BRIDGE_CONFIG_ADDR,
++ PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN);
++ if (ret)
++ goto qm_config_fail;
++
++ return 0;
++
++qm_config_fail:
++ dev_err(ppe_dev->dev, "PPE QM config error %d\n", ret);
++ return ret;
++}
++
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+ {
+- return ppe_config_bm(ppe_dev);
++ int ret;
++
++ ret = ppe_config_bm(ppe_dev);
++ if (ret)
++ return ret;
++
++ return ppe_config_qm(ppe_dev);
+ }
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -9,6 +9,16 @@
+
+ #include <linux/bitfield.h>
+
++/* PPE queue counters enable/disable control. */
++#define PPE_EG_BRIDGE_CONFIG_ADDR 0x20044
++#define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN BIT(2)
++
++/* Table addresses for per-queue dequeue setting. */
++#define PPE_DEQ_OPR_TBL_ADDR 0x430000
++#define PPE_DEQ_OPR_TBL_ENTRIES 300
++#define PPE_DEQ_OPR_TBL_INC 0x10
++#define PPE_DEQ_OPR_TBL_DEQ_DISABLE BIT(0)
++
+ /* There are 15 BM ports and 4 BM groups supported by PPE.
+ * BM port (0-7) is for EDMA port 0, BM port (8-13) is for
+ * PPE physical port 1-6 and BM port 14 is for EIP port.
+@@ -56,4 +66,79 @@
+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_DYNAMIC)
+ #define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value) \
+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_PRE_ALLOC)
++
++/* PPE unicast queue (0-255) configurations. */
++#define PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR 0x848000
++#define PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES 256
++#define PPE_AC_UNICAST_QUEUE_CFG_TBL_INC 0x10
++#define PPE_AC_UNICAST_QUEUE_CFG_W0_EN BIT(0)
++#define PPE_AC_UNICAST_QUEUE_CFG_W0_WRED_EN BIT(1)
++#define PPE_AC_UNICAST_QUEUE_CFG_W0_FC_EN BIT(2)
++#define PPE_AC_UNICAST_QUEUE_CFG_W0_CLR_AWARE BIT(3)
++#define PPE_AC_UNICAST_QUEUE_CFG_W0_GRP_ID GENMASK(5, 4)
++#define PPE_AC_UNICAST_QUEUE_CFG_W0_PRE_LIMIT GENMASK(16, 6)
++#define PPE_AC_UNICAST_QUEUE_CFG_W0_DYNAMIC BIT(17)
++#define PPE_AC_UNICAST_QUEUE_CFG_W0_WEIGHT GENMASK(20, 18)
++#define PPE_AC_UNICAST_QUEUE_CFG_W0_THRESHOLD GENMASK(31, 21)
++#define PPE_AC_UNICAST_QUEUE_CFG_W3_GRN_RESUME GENMASK(23, 13)
++
++#define PPE_AC_UNICAST_QUEUE_SET_EN(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNICAST_QUEUE_CFG_W0_EN)
++#define PPE_AC_UNICAST_QUEUE_SET_GRP_ID(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNICAST_QUEUE_CFG_W0_GRP_ID)
++#define PPE_AC_UNICAST_QUEUE_SET_PRE_LIMIT(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNICAST_QUEUE_CFG_W0_PRE_LIMIT)
++#define PPE_AC_UNICAST_QUEUE_SET_DYNAMIC(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNICAST_QUEUE_CFG_W0_DYNAMIC)
++#define PPE_AC_UNICAST_QUEUE_SET_WEIGHT(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNICAST_QUEUE_CFG_W0_WEIGHT)
++#define PPE_AC_UNICAST_QUEUE_SET_THRESHOLD(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNICAST_QUEUE_CFG_W0_THRESHOLD)
++#define PPE_AC_UNICAST_QUEUE_SET_GRN_RESUME(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x3, value, PPE_AC_UNICAST_QUEUE_CFG_W3_GRN_RESUME)
++
++/* PPE multicast queue (256-299) configurations. */
++#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_ADDR 0x84a000
++#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_ENTRIES 44
++#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_INC 0x10
++#define PPE_AC_MULTICAST_QUEUE_CFG_W0_EN BIT(0)
++#define PPE_AC_MULTICAST_QUEUE_CFG_W0_FC_EN BIT(1)
++#define PPE_AC_MULTICAST_QUEUE_CFG_W0_CLR_AWARE BIT(2)
++#define PPE_AC_MULTICAST_QUEUE_CFG_W0_GRP_ID GENMASK(4, 3)
++#define PPE_AC_MULTICAST_QUEUE_CFG_W0_PRE_LIMIT GENMASK(15, 5)
++#define PPE_AC_MULTICAST_QUEUE_CFG_W0_THRESHOLD GENMASK(26, 16)
++#define PPE_AC_MULTICAST_QUEUE_CFG_W2_RESUME GENMASK(17, 7)
++
++#define PPE_AC_MULTICAST_QUEUE_SET_EN(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_MULTICAST_QUEUE_CFG_W0_EN)
++#define PPE_AC_MULTICAST_QUEUE_SET_GRN_GRP_ID(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_MULTICAST_QUEUE_CFG_W0_GRP_ID)
++#define PPE_AC_MULTICAST_QUEUE_SET_GRN_PRE_LIMIT(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_MULTICAST_QUEUE_CFG_W0_PRE_LIMIT)
++#define PPE_AC_MULTICAST_QUEUE_SET_GRN_THRESHOLD(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_MULTICAST_QUEUE_CFG_W0_THRESHOLD)
++#define PPE_AC_MULTICAST_QUEUE_SET_GRN_RESUME(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x2, value, PPE_AC_MULTICAST_QUEUE_CFG_W2_RESUME)
++
++/* PPE admission control group (0-3) configurations */
++#define PPE_AC_GRP_CFG_TBL_ADDR 0x84c000
++#define PPE_AC_GRP_CFG_TBL_ENTRIES 0x4
++#define PPE_AC_GRP_CFG_TBL_INC 0x10
++#define PPE_AC_GRP_W0_AC_EN BIT(0)
++#define PPE_AC_GRP_W0_AC_FC_EN BIT(1)
++#define PPE_AC_GRP_W0_CLR_AWARE BIT(2)
++#define PPE_AC_GRP_W0_THRESHOLD_LOW GENMASK(31, 25)
++#define PPE_AC_GRP_W1_THRESHOLD_HIGH GENMASK(3, 0)
++#define PPE_AC_GRP_W1_BUF_LIMIT GENMASK(14, 4)
++#define PPE_AC_GRP_W2_RESUME_GRN GENMASK(15, 5)
++#define PPE_AC_GRP_W2_PRE_ALLOC GENMASK(26, 16)
++
++#define PPE_AC_GRP_SET_BUF_LIMIT(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_AC_GRP_W1_BUF_LIMIT)
++
++/* Table addresses for per-queue enqueue setting. */
++#define PPE_ENQ_OPR_TBL_ADDR 0x85c000
++#define PPE_ENQ_OPR_TBL_ENTRIES 300
++#define PPE_ENQ_OPR_TBL_INC 0x10
++#define PPE_ENQ_OPR_TBL_ENQ_DISABLE BIT(0)
+ #endif
--- /dev/null
+From 333edaf474cd707b0a04c57f255b56bc3c015789 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:40 +0800
+Subject: [PATCH] net: ethernet: qualcomm: Initialize the PPE scheduler
+ settings
+
+The PPE scheduler settings determine the priority of scheduling the
+packet across the different hardware queues per PPE port.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../net/ethernet/qualcomm/ppe/ppe_config.c | 788 +++++++++++++++++-
+ .../net/ethernet/qualcomm/ppe/ppe_config.h | 37 +
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 97 +++
+ 3 files changed, 921 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -16,6 +16,8 @@
+ #include "ppe_config.h"
+ #include "ppe_regs.h"
+
++#define PPE_QUEUE_SCH_PRI_NUM 8
++
+ /**
+ * struct ppe_bm_port_config - PPE BM port configuration.
+ * @port_id_start: The fist BM port ID to configure.
+@@ -66,6 +68,66 @@ struct ppe_qm_queue_config {
+ bool dynamic;
+ };
+
++/**
++ * struct ppe_scheduler_bm_config - PPE arbitration for buffer config.
++ * @valid: Arbitration entry valid or not.
++ * @is_egress: Arbitration entry for egress or not.
++ * @port: Port ID to use arbitration entry.
++ * @second_valid: Second port valid or not.
++ * @second_port: Second port to use.
++ *
++ * Configure the scheduler settings for accessing and releasing the PPE buffers.
++ */
++struct ppe_scheduler_bm_config {
++ bool valid;
++ bool is_egress;
++ unsigned int port;
++ bool second_valid;
++ unsigned int second_port;
++};
++
++/**
++ * struct ppe_scheduler_qm_config - PPE arbitration for scheduler config.
++ * @ensch_port_bmp: Port bit map for enqueue scheduler.
++ * @ensch_port: Port ID to enqueue scheduler.
++ * @desch_port: Port ID to dequeue scheduler.
++ * @desch_second_valid: Dequeue for the second port valid or not.
++ * @desch_second_port: Second port ID to dequeue scheduler.
++ *
++ * Configure the scheduler settings for enqueuing and dequeuing packets on
++ * the PPE port.
++ */
++struct ppe_scheduler_qm_config {
++ unsigned int ensch_port_bmp;
++ unsigned int ensch_port;
++ unsigned int desch_port;
++ bool desch_second_valid;
++ unsigned int desch_second_port;
++};
++
++/**
++ * struct ppe_scheduler_port_config - PPE port scheduler config.
++ * @port: Port ID to be scheduled.
++ * @flow_level: Scheduler flow level or not.
++ * @node_id: Node ID, for level 0, queue ID is used.
++ * @loop_num: Loop number of scheduler config.
++ * @pri_max: Max priority configured.
++ * @flow_id: Strict priority ID.
++ * @drr_node_id: Node ID for scheduler.
++ *
++ * PPE port scheduler configuration which decides the priority in the
++ * packet scheduler for the egress port.
++ */
++struct ppe_scheduler_port_config {
++ unsigned int port;
++ bool flow_level;
++ unsigned int node_id;
++ unsigned int loop_num;
++ unsigned int pri_max;
++ unsigned int flow_id;
++ unsigned int drr_node_id;
++};
++
+ /* Assign the share buffer number 1550 to group 0 by default. */
+ static const int ipq9574_ppe_bm_group_config = 1550;
+
+@@ -152,6 +214,599 @@ static const struct ppe_qm_queue_config
+ },
+ };
+
++/* Scheduler configuration for the assigning and releasing buffers for the
++ * packet passing through PPE, which is different per SoC.
++ */
++static const struct ppe_scheduler_bm_config ipq9574_ppe_sch_bm_config[] = {
++ {1, 0, 0, 0, 0},
++ {1, 1, 0, 0, 0},
++ {1, 0, 5, 0, 0},
++ {1, 1, 5, 0, 0},
++ {1, 0, 6, 0, 0},
++ {1, 1, 6, 0, 0},
++ {1, 0, 1, 0, 0},
++ {1, 1, 1, 0, 0},
++ {1, 0, 0, 0, 0},
++ {1, 1, 0, 0, 0},
++ {1, 0, 5, 0, 0},
++ {1, 1, 5, 0, 0},
++ {1, 0, 6, 0, 0},
++ {1, 1, 6, 0, 0},
++ {1, 0, 7, 0, 0},
++ {1, 1, 7, 0, 0},
++ {1, 0, 0, 0, 0},
++ {1, 1, 0, 0, 0},
++ {1, 0, 1, 0, 0},
++ {1, 1, 1, 0, 0},
++ {1, 0, 5, 0, 0},
++ {1, 1, 5, 0, 0},
++ {1, 0, 6, 0, 0},
++ {1, 1, 6, 0, 0},
++ {1, 0, 2, 0, 0},
++ {1, 1, 2, 0, 0},
++ {1, 0, 0, 0, 0},
++ {1, 1, 0, 0, 0},
++ {1, 0, 5, 0, 0},
++ {1, 1, 5, 0, 0},
++ {1, 0, 6, 0, 0},
++ {1, 1, 6, 0, 0},
++ {1, 0, 1, 0, 0},
++ {1, 1, 1, 0, 0},
++ {1, 0, 3, 0, 0},
++ {1, 1, 3, 0, 0},
++ {1, 0, 0, 0, 0},
++ {1, 1, 0, 0, 0},
++ {1, 0, 5, 0, 0},
++ {1, 1, 5, 0, 0},
++ {1, 0, 6, 0, 0},
++ {1, 1, 6, 0, 0},
++ {1, 0, 7, 0, 0},
++ {1, 1, 7, 0, 0},
++ {1, 0, 0, 0, 0},
++ {1, 1, 0, 0, 0},
++ {1, 0, 1, 0, 0},
++ {1, 1, 1, 0, 0},
++ {1, 0, 5, 0, 0},
++ {1, 1, 5, 0, 0},
++ {1, 0, 6, 0, 0},
++ {1, 1, 6, 0, 0},
++ {1, 0, 4, 0, 0},
++ {1, 1, 4, 0, 0},
++ {1, 0, 0, 0, 0},
++ {1, 1, 0, 0, 0},
++ {1, 0, 5, 0, 0},
++ {1, 1, 5, 0, 0},
++ {1, 0, 6, 0, 0},
++ {1, 1, 6, 0, 0},
++ {1, 0, 1, 0, 0},
++ {1, 1, 1, 0, 0},
++ {1, 0, 0, 0, 0},
++ {1, 1, 0, 0, 0},
++ {1, 0, 5, 0, 0},
++ {1, 1, 5, 0, 0},
++ {1, 0, 6, 0, 0},
++ {1, 1, 6, 0, 0},
++ {1, 0, 2, 0, 0},
++ {1, 1, 2, 0, 0},
++ {1, 0, 0, 0, 0},
++ {1, 1, 0, 0, 0},
++ {1, 0, 7, 0, 0},
++ {1, 1, 7, 0, 0},
++ {1, 0, 5, 0, 0},
++ {1, 1, 5, 0, 0},
++ {1, 0, 6, 0, 0},
++ {1, 1, 6, 0, 0},
++ {1, 0, 1, 0, 0},
++ {1, 1, 1, 0, 0},
++ {1, 0, 0, 0, 0},
++ {1, 1, 0, 0, 0},
++ {1, 0, 5, 0, 0},
++ {1, 1, 5, 0, 0},
++ {1, 0, 6, 0, 0},
++ {1, 1, 6, 0, 0},
++ {1, 0, 3, 0, 0},
++ {1, 1, 3, 0, 0},
++ {1, 0, 1, 0, 0},
++ {1, 1, 1, 0, 0},
++ {1, 0, 0, 0, 0},
++ {1, 1, 0, 0, 0},
++ {1, 0, 5, 0, 0},
++ {1, 1, 5, 0, 0},
++ {1, 0, 6, 0, 0},
++ {1, 1, 6, 0, 0},
++ {1, 0, 4, 0, 0},
++ {1, 1, 4, 0, 0},
++ {1, 0, 7, 0, 0},
++ {1, 1, 7, 0, 0},
++};
++
++/* Scheduler configuration for dispatching packet on PPE queues, which
++ * is different per SoC.
++ */
++static const struct ppe_scheduler_qm_config ipq9574_ppe_sch_qm_config[] = {
++ {0x98, 6, 0, 1, 1},
++ {0x94, 5, 6, 1, 3},
++ {0x86, 0, 5, 1, 4},
++ {0x8C, 1, 6, 1, 0},
++ {0x1C, 7, 5, 1, 1},
++ {0x98, 2, 6, 1, 0},
++ {0x1C, 5, 7, 1, 1},
++ {0x34, 3, 6, 1, 0},
++ {0x8C, 4, 5, 1, 1},
++ {0x98, 2, 6, 1, 0},
++ {0x8C, 5, 4, 1, 1},
++ {0xA8, 0, 6, 1, 2},
++ {0x98, 5, 1, 1, 0},
++ {0x98, 6, 5, 1, 2},
++ {0x89, 1, 6, 1, 4},
++ {0xA4, 3, 0, 1, 1},
++ {0x8C, 5, 6, 1, 4},
++ {0xA8, 0, 2, 1, 1},
++ {0x98, 6, 5, 1, 0},
++ {0xC4, 4, 3, 1, 1},
++ {0x94, 6, 5, 1, 0},
++ {0x1C, 7, 6, 1, 1},
++ {0x98, 2, 5, 1, 0},
++ {0x1C, 6, 7, 1, 1},
++ {0x1C, 5, 6, 1, 0},
++ {0x94, 3, 5, 1, 1},
++ {0x8C, 4, 6, 1, 0},
++ {0x94, 1, 5, 1, 3},
++ {0x94, 6, 1, 1, 0},
++ {0xD0, 3, 5, 1, 2},
++ {0x98, 6, 0, 1, 1},
++ {0x94, 5, 6, 1, 3},
++ {0x94, 1, 5, 1, 0},
++ {0x98, 2, 6, 1, 1},
++ {0x8C, 4, 5, 1, 0},
++ {0x1C, 7, 6, 1, 1},
++ {0x8C, 0, 5, 1, 4},
++ {0x89, 1, 6, 1, 2},
++ {0x98, 5, 0, 1, 1},
++ {0x94, 6, 5, 1, 3},
++ {0x92, 0, 6, 1, 2},
++ {0x98, 1, 5, 1, 0},
++ {0x98, 6, 2, 1, 1},
++ {0xD0, 0, 5, 1, 3},
++ {0x94, 6, 0, 1, 1},
++ {0x8C, 5, 6, 1, 4},
++ {0x8C, 1, 5, 1, 0},
++ {0x1C, 6, 7, 1, 1},
++ {0x1C, 5, 6, 1, 0},
++ {0xB0, 2, 3, 1, 1},
++ {0xC4, 4, 5, 1, 0},
++ {0x8C, 6, 4, 1, 1},
++ {0xA4, 3, 6, 1, 0},
++ {0x1C, 5, 7, 1, 1},
++ {0x4C, 0, 5, 1, 4},
++ {0x8C, 6, 0, 1, 1},
++ {0x34, 7, 6, 1, 3},
++ {0x94, 5, 0, 1, 1},
++ {0x98, 6, 5, 1, 2},
++};
++
++static const struct ppe_scheduler_port_config ppe_port_sch_config[] = {
++ {
++ .port = 0,
++ .flow_level = true,
++ .node_id = 0,
++ .loop_num = 1,
++ .pri_max = 1,
++ .flow_id = 0,
++ .drr_node_id = 0,
++ },
++ {
++ .port = 0,
++ .flow_level = false,
++ .node_id = 0,
++ .loop_num = 8,
++ .pri_max = 8,
++ .flow_id = 0,
++ .drr_node_id = 0,
++ },
++ {
++ .port = 0,
++ .flow_level = false,
++ .node_id = 8,
++ .loop_num = 8,
++ .pri_max = 8,
++ .flow_id = 0,
++ .drr_node_id = 0,
++ },
++ {
++ .port = 0,
++ .flow_level = false,
++ .node_id = 16,
++ .loop_num = 8,
++ .pri_max = 8,
++ .flow_id = 0,
++ .drr_node_id = 0,
++ },
++ {
++ .port = 0,
++ .flow_level = false,
++ .node_id = 24,
++ .loop_num = 8,
++ .pri_max = 8,
++ .flow_id = 0,
++ .drr_node_id = 0,
++ },
++ {
++ .port = 0,
++ .flow_level = false,
++ .node_id = 32,
++ .loop_num = 8,
++ .pri_max = 8,
++ .flow_id = 0,
++ .drr_node_id = 0,
++ },
++ {
++ .port = 0,
++ .flow_level = false,
++ .node_id = 40,
++ .loop_num = 8,
++ .pri_max = 8,
++ .flow_id = 0,
++ .drr_node_id = 0,
++ },
++ {
++ .port = 0,
++ .flow_level = false,
++ .node_id = 48,
++ .loop_num = 8,
++ .pri_max = 8,
++ .flow_id = 0,
++ .drr_node_id = 0,
++ },
++ {
++ .port = 0,
++ .flow_level = false,
++ .node_id = 56,
++ .loop_num = 8,
++ .pri_max = 8,
++ .flow_id = 0,
++ .drr_node_id = 0,
++ },
++ {
++ .port = 0,
++ .flow_level = false,
++ .node_id = 256,
++ .loop_num = 8,
++ .pri_max = 8,
++ .flow_id = 0,
++ .drr_node_id = 0,
++ },
++ {
++ .port = 0,
++ .flow_level = false,
++ .node_id = 264,
++ .loop_num = 8,
++ .pri_max = 8,
++ .flow_id = 0,
++ .drr_node_id = 0,
++ },
++ {
++ .port = 1,
++ .flow_level = true,
++ .node_id = 36,
++ .loop_num = 2,
++ .pri_max = 0,
++ .flow_id = 1,
++ .drr_node_id = 8,
++ },
++ {
++ .port = 1,
++ .flow_level = false,
++ .node_id = 144,
++ .loop_num = 16,
++ .pri_max = 8,
++ .flow_id = 36,
++ .drr_node_id = 48,
++ },
++ {
++ .port = 1,
++ .flow_level = false,
++ .node_id = 272,
++ .loop_num = 4,
++ .pri_max = 4,
++ .flow_id = 36,
++ .drr_node_id = 48,
++ },
++ {
++ .port = 2,
++ .flow_level = true,
++ .node_id = 40,
++ .loop_num = 2,
++ .pri_max = 0,
++ .flow_id = 2,
++ .drr_node_id = 12,
++ },
++ {
++ .port = 2,
++ .flow_level = false,
++ .node_id = 160,
++ .loop_num = 16,
++ .pri_max = 8,
++ .flow_id = 40,
++ .drr_node_id = 64,
++ },
++ {
++ .port = 2,
++ .flow_level = false,
++ .node_id = 276,
++ .loop_num = 4,
++ .pri_max = 4,
++ .flow_id = 40,
++ .drr_node_id = 64,
++ },
++ {
++ .port = 3,
++ .flow_level = true,
++ .node_id = 44,
++ .loop_num = 2,
++ .pri_max = 0,
++ .flow_id = 3,
++ .drr_node_id = 16,
++ },
++ {
++ .port = 3,
++ .flow_level = false,
++ .node_id = 176,
++ .loop_num = 16,
++ .pri_max = 8,
++ .flow_id = 44,
++ .drr_node_id = 80,
++ },
++ {
++ .port = 3,
++ .flow_level = false,
++ .node_id = 280,
++ .loop_num = 4,
++ .pri_max = 4,
++ .flow_id = 44,
++ .drr_node_id = 80,
++ },
++ {
++ .port = 4,
++ .flow_level = true,
++ .node_id = 48,
++ .loop_num = 2,
++ .pri_max = 0,
++ .flow_id = 4,
++ .drr_node_id = 20,
++ },
++ {
++ .port = 4,
++ .flow_level = false,
++ .node_id = 192,
++ .loop_num = 16,
++ .pri_max = 8,
++ .flow_id = 48,
++ .drr_node_id = 96,
++ },
++ {
++ .port = 4,
++ .flow_level = false,
++ .node_id = 284,
++ .loop_num = 4,
++ .pri_max = 4,
++ .flow_id = 48,
++ .drr_node_id = 96,
++ },
++ {
++ .port = 5,
++ .flow_level = true,
++ .node_id = 52,
++ .loop_num = 2,
++ .pri_max = 0,
++ .flow_id = 5,
++ .drr_node_id = 24,
++ },
++ {
++ .port = 5,
++ .flow_level = false,
++ .node_id = 208,
++ .loop_num = 16,
++ .pri_max = 8,
++ .flow_id = 52,
++ .drr_node_id = 112,
++ },
++ {
++ .port = 5,
++ .flow_level = false,
++ .node_id = 288,
++ .loop_num = 4,
++ .pri_max = 4,
++ .flow_id = 52,
++ .drr_node_id = 112,
++ },
++ {
++ .port = 6,
++ .flow_level = true,
++ .node_id = 56,
++ .loop_num = 2,
++ .pri_max = 0,
++ .flow_id = 6,
++ .drr_node_id = 28,
++ },
++ {
++ .port = 6,
++ .flow_level = false,
++ .node_id = 224,
++ .loop_num = 16,
++ .pri_max = 8,
++ .flow_id = 56,
++ .drr_node_id = 128,
++ },
++ {
++ .port = 6,
++ .flow_level = false,
++ .node_id = 292,
++ .loop_num = 4,
++ .pri_max = 4,
++ .flow_id = 56,
++ .drr_node_id = 128,
++ },
++ {
++ .port = 7,
++ .flow_level = true,
++ .node_id = 60,
++ .loop_num = 2,
++ .pri_max = 0,
++ .flow_id = 7,
++ .drr_node_id = 32,
++ },
++ {
++ .port = 7,
++ .flow_level = false,
++ .node_id = 240,
++ .loop_num = 16,
++ .pri_max = 8,
++ .flow_id = 60,
++ .drr_node_id = 144,
++ },
++ {
++ .port = 7,
++ .flow_level = false,
++ .node_id = 296,
++ .loop_num = 4,
++ .pri_max = 4,
++ .flow_id = 60,
++ .drr_node_id = 144,
++ },
++};
++
++/* Set the PPE queue level scheduler configuration. */
++static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
++ int node_id, int port,
++ struct ppe_scheduler_cfg scheduler_cfg)
++{
++ u32 val, reg;
++ int ret;
++
++ reg = PPE_L0_FLOW_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_MAP_TBL_INC;
++ val = FIELD_PREP(PPE_L0_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
++ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
++ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
++ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
++ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
++
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ return ret;
++
++ reg = PPE_L0_C_FLOW_CFG_TBL_ADDR +
++ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
++ PPE_L0_C_FLOW_CFG_TBL_INC;
++ val = FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
++ val |= FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
++
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ return ret;
++
++ reg = PPE_L0_E_FLOW_CFG_TBL_ADDR +
++ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
++ PPE_L0_E_FLOW_CFG_TBL_INC;
++ val = FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
++ val |= FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
++
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ return ret;
++
++ reg = PPE_L0_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
++ val = FIELD_PREP(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, port);
++
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ return ret;
++
++ reg = PPE_L0_COMP_CFG_TBL_ADDR + node_id * PPE_L0_COMP_CFG_TBL_INC;
++ val = FIELD_PREP(PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode);
++
++ return regmap_update_bits(ppe_dev->regmap, reg,
++ PPE_L0_COMP_CFG_TBL_NODE_METER_LEN,
++ val);
++}
++
++/* Set the PPE flow level scheduler configuration. */
++static int ppe_scheduler_l1_queue_map_set(struct ppe_device *ppe_dev,
++ int node_id, int port,
++ struct ppe_scheduler_cfg scheduler_cfg)
++{
++ u32 val, reg;
++ int ret;
++
++ val = FIELD_PREP(PPE_L1_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
++ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
++ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
++ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
++ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
++ reg = PPE_L1_FLOW_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_MAP_TBL_INC;
++
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ return ret;
++
++ val = FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
++ val |= FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
++ reg = PPE_L1_C_FLOW_CFG_TBL_ADDR +
++ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
++ PPE_L1_C_FLOW_CFG_TBL_INC;
++
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ return ret;
++
++ val = FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
++ val |= FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
++ reg = PPE_L1_E_FLOW_CFG_TBL_ADDR +
++ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
++ PPE_L1_E_FLOW_CFG_TBL_INC;
++
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ return ret;
++
++ val = FIELD_PREP(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, port);
++ reg = PPE_L1_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
++
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ return ret;
++
++ reg = PPE_L1_COMP_CFG_TBL_ADDR + node_id * PPE_L1_COMP_CFG_TBL_INC;
++ val = FIELD_PREP(PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode);
++
++ return regmap_update_bits(ppe_dev->regmap, reg, PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val);
++}
++
++/**
++ * ppe_queue_scheduler_set - Configure scheduler for PPE hardware queue
++ * @ppe_dev: PPE device
++ * @node_id: PPE queue ID or flow ID
++ * @flow_level: Flow level scheduler or queue level scheduler
++ * @port: PPE port ID set scheduler configuration
++ * @scheduler_cfg: PPE scheduler configuration
++ *
++ * PPE scheduler configuration supports queue level and flow level on
++ * the PPE egress port.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
++ int node_id, bool flow_level, int port,
++ struct ppe_scheduler_cfg scheduler_cfg)
++{
++ if (flow_level)
++ return ppe_scheduler_l1_queue_map_set(ppe_dev, node_id,
++ port, scheduler_cfg);
++
++ return ppe_scheduler_l0_queue_map_set(ppe_dev, node_id,
++ port, scheduler_cfg);
++}
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+ const struct ppe_bm_port_config port_cfg)
+ {
+@@ -358,6 +1013,133 @@ qm_config_fail:
+ return ret;
+ }
+
++static int ppe_node_scheduler_config(struct ppe_device *ppe_dev,
++ const struct ppe_scheduler_port_config config)
++{
++ struct ppe_scheduler_cfg sch_cfg;
++ int ret, i;
++
++ for (i = 0; i < config.loop_num; i++) {
++ if (!config.pri_max) {
++ /* Round robin scheduler without priority. */
++ sch_cfg.flow_id = config.flow_id;
++ sch_cfg.pri = 0;
++ sch_cfg.drr_node_id = config.drr_node_id;
++ } else {
++ sch_cfg.flow_id = config.flow_id + (i / config.pri_max);
++ sch_cfg.pri = i % config.pri_max;
++ sch_cfg.drr_node_id = config.drr_node_id + i;
++ }
++
++ /* Scheduler weight, must be more than 0. */
++ sch_cfg.drr_node_wt = 1;
++ /* Byte based to be scheduled. */
++ sch_cfg.unit_is_packet = false;
++ /* Frame + CRC calculated. */
++ sch_cfg.frame_mode = PPE_SCH_WITH_FRAME_CRC;
++
++ ret = ppe_queue_scheduler_set(ppe_dev, config.node_id + i,
++ config.flow_level,
++ config.port,
++ sch_cfg);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++/* Initialize scheduler settings for PPE buffer utilization and dispatching
++ * packet on PPE queue.
++ */
++static int ppe_config_scheduler(struct ppe_device *ppe_dev)
++{
++ const struct ppe_scheduler_port_config *port_cfg;
++ const struct ppe_scheduler_qm_config *qm_cfg;
++ const struct ppe_scheduler_bm_config *bm_cfg;
++ int ret, i, count;
++ u32 val, reg;
++
++ count = ARRAY_SIZE(ipq9574_ppe_sch_bm_config);
++ bm_cfg = ipq9574_ppe_sch_bm_config;
++
++ /* Configure the depth of BM scheduler entries. */
++ val = FIELD_PREP(PPE_BM_SCH_CTRL_SCH_DEPTH, count);
++ val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_OFFSET, 0);
++ val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_EN, 1);
++
++ ret = regmap_write(ppe_dev->regmap, PPE_BM_SCH_CTRL_ADDR, val);
++ if (ret)
++ goto sch_config_fail;
++
++ /* Configure each BM scheduler entry with the valid ingress port and
++ * egress port, the second port takes effect when the specified port
++ * is in the inactive state.
++ */
++ for (i = 0; i < count; i++) {
++ val = FIELD_PREP(PPE_BM_SCH_CFG_TBL_VALID, bm_cfg[i].valid);
++ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_DIR, bm_cfg[i].is_egress);
++ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_PORT_NUM, bm_cfg[i].port);
++ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID, bm_cfg[i].second_valid);
++ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT, bm_cfg[i].second_port);
++
++ reg = PPE_BM_SCH_CFG_TBL_ADDR + i * PPE_BM_SCH_CFG_TBL_INC;
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ goto sch_config_fail;
++ }
++
++ count = ARRAY_SIZE(ipq9574_ppe_sch_qm_config);
++ qm_cfg = ipq9574_ppe_sch_qm_config;
++
++ /* Configure the depth of QM scheduler entries. */
++ val = FIELD_PREP(PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH, count);
++ ret = regmap_write(ppe_dev->regmap, PPE_PSCH_SCH_DEPTH_CFG_ADDR, val);
++ if (ret)
++ goto sch_config_fail;
++
++ /* Configure each QM scheduler entry with enqueue port and dequeue
++ * port, the second port takes effect when the specified dequeue
++ * port is in the inactive port.
++ */
++ for (i = 0; i < count; i++) {
++ val = FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP,
++ qm_cfg[i].ensch_port_bmp);
++ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT,
++ qm_cfg[i].ensch_port);
++ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_PORT,
++ qm_cfg[i].desch_port);
++ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN,
++ qm_cfg[i].desch_second_valid);
++ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT,
++ qm_cfg[i].desch_second_port);
++
++ reg = PPE_PSCH_SCH_CFG_TBL_ADDR + i * PPE_PSCH_SCH_CFG_TBL_INC;
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ goto sch_config_fail;
++ }
++
++ count = ARRAY_SIZE(ppe_port_sch_config);
++ port_cfg = ppe_port_sch_config;
++
++ /* Configure scheduler per PPE queue or flow. */
++ for (i = 0; i < count; i++) {
++ if (port_cfg[i].port >= ppe_dev->num_ports)
++ break;
++
++ ret = ppe_node_scheduler_config(ppe_dev, port_cfg[i]);
++ if (ret)
++ goto sch_config_fail;
++ }
++
++ return 0;
++
++sch_config_fail:
++ dev_err(ppe_dev->dev, "PPE scheduler arbitration config error %d\n", ret);
++ return ret;
++};
++
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+ {
+ int ret;
+@@ -366,5 +1148,9 @@ int ppe_hw_config(struct ppe_device *ppe
+ if (ret)
+ return ret;
+
+- return ppe_config_qm(ppe_dev);
++ ret = ppe_config_qm(ppe_dev);
++ if (ret)
++ return ret;
++
++ return ppe_config_scheduler(ppe_dev);
+ }
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -8,5 +8,42 @@
+
+ #include "ppe.h"
+
++/**
++ * enum ppe_scheduler_frame_mode - PPE scheduler frame mode.
++ * @PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC: The scheduled frame includes IPG,
++ * preamble, Ethernet packet and CRC.
++ * @PPE_SCH_WITH_FRAME_CRC: The scheduled frame includes Ethernet frame and CRC
++ * excluding IPG and preamble.
++ * @PPE_SCH_WITH_L3_PAYLOAD: The scheduled frame includes layer 3 packet data.
++ */
++enum ppe_scheduler_frame_mode {
++ PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC = 0,
++ PPE_SCH_WITH_FRAME_CRC = 1,
++ PPE_SCH_WITH_L3_PAYLOAD = 2,
++};
++
++/**
++ * struct ppe_scheduler_cfg - PPE scheduler configuration.
++ * @flow_id: PPE flow ID.
++ * @pri: Scheduler priority.
++ * @drr_node_id: Node ID for scheduled traffic.
++ * @drr_node_wt: Weight for scheduled traffic.
++ * @unit_is_packet: Packet based or byte based unit for scheduled traffic.
++ * @frame_mode: Packet mode to be scheduled.
++ *
++ * PPE scheduler supports commit rate and exceed rate configurations.
++ */
++struct ppe_scheduler_cfg {
++ int flow_id;
++ int pri;
++ int drr_node_id;
++ int drr_node_wt;
++ bool unit_is_packet;
++ enum ppe_scheduler_frame_mode frame_mode;
++};
++
+ int ppe_hw_config(struct ppe_device *ppe_dev);
++int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
++ int node_id, bool flow_level, int port,
++ struct ppe_scheduler_cfg scheduler_cfg);
+ #endif
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -9,16 +9,113 @@
+
+ #include <linux/bitfield.h>
+
++/* PPE scheduler configurations for buffer manager block. */
++#define PPE_BM_SCH_CTRL_ADDR 0xb000
++#define PPE_BM_SCH_CTRL_INC 4
++#define PPE_BM_SCH_CTRL_SCH_DEPTH GENMASK(7, 0)
++#define PPE_BM_SCH_CTRL_SCH_OFFSET GENMASK(14, 8)
++#define PPE_BM_SCH_CTRL_SCH_EN BIT(31)
++
++#define PPE_BM_SCH_CFG_TBL_ADDR 0xc000
++#define PPE_BM_SCH_CFG_TBL_ENTRIES 128
++#define PPE_BM_SCH_CFG_TBL_INC 0x10
++#define PPE_BM_SCH_CFG_TBL_PORT_NUM GENMASK(3, 0)
++#define PPE_BM_SCH_CFG_TBL_DIR BIT(4)
++#define PPE_BM_SCH_CFG_TBL_VALID BIT(5)
++#define PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID BIT(6)
++#define PPE_BM_SCH_CFG_TBL_SECOND_PORT GENMASK(11, 8)
++
+ /* PPE queue counters enable/disable control. */
+ #define PPE_EG_BRIDGE_CONFIG_ADDR 0x20044
+ #define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN BIT(2)
+
++/* Port scheduler global config. */
++#define PPE_PSCH_SCH_DEPTH_CFG_ADDR 0x400000
++#define PPE_PSCH_SCH_DEPTH_CFG_INC 4
++#define PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH GENMASK(7, 0)
++
++/* PPE queue level scheduler configurations. */
++#define PPE_L0_FLOW_MAP_TBL_ADDR 0x402000
++#define PPE_L0_FLOW_MAP_TBL_ENTRIES 300
++#define PPE_L0_FLOW_MAP_TBL_INC 0x10
++#define PPE_L0_FLOW_MAP_TBL_FLOW_ID GENMASK(5, 0)
++#define PPE_L0_FLOW_MAP_TBL_C_PRI GENMASK(8, 6)
++#define PPE_L0_FLOW_MAP_TBL_E_PRI GENMASK(11, 9)
++#define PPE_L0_FLOW_MAP_TBL_C_NODE_WT GENMASK(21, 12)
++#define PPE_L0_FLOW_MAP_TBL_E_NODE_WT GENMASK(31, 22)
++
++#define PPE_L0_C_FLOW_CFG_TBL_ADDR 0x404000
++#define PPE_L0_C_FLOW_CFG_TBL_ENTRIES 512
++#define PPE_L0_C_FLOW_CFG_TBL_INC 0x10
++#define PPE_L0_C_FLOW_CFG_TBL_NODE_ID GENMASK(7, 0)
++#define PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(8)
++
++#define PPE_L0_E_FLOW_CFG_TBL_ADDR 0x406000
++#define PPE_L0_E_FLOW_CFG_TBL_ENTRIES 512
++#define PPE_L0_E_FLOW_CFG_TBL_INC 0x10
++#define PPE_L0_E_FLOW_CFG_TBL_NODE_ID GENMASK(7, 0)
++#define PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(8)
++
++#define PPE_L0_FLOW_PORT_MAP_TBL_ADDR 0x408000
++#define PPE_L0_FLOW_PORT_MAP_TBL_ENTRIES 300
++#define PPE_L0_FLOW_PORT_MAP_TBL_INC 0x10
++#define PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM GENMASK(3, 0)
++
++#define PPE_L0_COMP_CFG_TBL_ADDR 0x428000
++#define PPE_L0_COMP_CFG_TBL_ENTRIES 300
++#define PPE_L0_COMP_CFG_TBL_INC 0x10
++#define PPE_L0_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
++#define PPE_L0_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
++
+ /* Table addresses for per-queue dequeue setting. */
+ #define PPE_DEQ_OPR_TBL_ADDR 0x430000
+ #define PPE_DEQ_OPR_TBL_ENTRIES 300
+ #define PPE_DEQ_OPR_TBL_INC 0x10
+ #define PPE_DEQ_OPR_TBL_DEQ_DISABLE BIT(0)
+
++/* PPE flow level scheduler configurations. */
++#define PPE_L1_FLOW_MAP_TBL_ADDR 0x440000
++#define PPE_L1_FLOW_MAP_TBL_ENTRIES 64
++#define PPE_L1_FLOW_MAP_TBL_INC 0x10
++#define PPE_L1_FLOW_MAP_TBL_FLOW_ID GENMASK(3, 0)
++#define PPE_L1_FLOW_MAP_TBL_C_PRI GENMASK(6, 4)
++#define PPE_L1_FLOW_MAP_TBL_E_PRI GENMASK(9, 7)
++#define PPE_L1_FLOW_MAP_TBL_C_NODE_WT GENMASK(19, 10)
++#define PPE_L1_FLOW_MAP_TBL_E_NODE_WT GENMASK(29, 20)
++
++#define PPE_L1_C_FLOW_CFG_TBL_ADDR 0x442000
++#define PPE_L1_C_FLOW_CFG_TBL_ENTRIES 64
++#define PPE_L1_C_FLOW_CFG_TBL_INC 0x10
++#define PPE_L1_C_FLOW_CFG_TBL_NODE_ID GENMASK(5, 0)
++#define PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(6)
++
++#define PPE_L1_E_FLOW_CFG_TBL_ADDR 0x444000
++#define PPE_L1_E_FLOW_CFG_TBL_ENTRIES 64
++#define PPE_L1_E_FLOW_CFG_TBL_INC 0x10
++#define PPE_L1_E_FLOW_CFG_TBL_NODE_ID GENMASK(5, 0)
++#define PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(6)
++
++#define PPE_L1_FLOW_PORT_MAP_TBL_ADDR 0x446000
++#define PPE_L1_FLOW_PORT_MAP_TBL_ENTRIES 64
++#define PPE_L1_FLOW_PORT_MAP_TBL_INC 0x10
++#define PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM GENMASK(3, 0)
++
++#define PPE_L1_COMP_CFG_TBL_ADDR 0x46a000
++#define PPE_L1_COMP_CFG_TBL_ENTRIES 64
++#define PPE_L1_COMP_CFG_TBL_INC 0x10
++#define PPE_L1_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
++#define PPE_L1_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
++
++/* PPE port scheduler configurations for egress. */
++#define PPE_PSCH_SCH_CFG_TBL_ADDR 0x47a000
++#define PPE_PSCH_SCH_CFG_TBL_ENTRIES 128
++#define PPE_PSCH_SCH_CFG_TBL_INC 0x10
++#define PPE_PSCH_SCH_CFG_TBL_DES_PORT GENMASK(3, 0)
++#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT GENMASK(7, 4)
++#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP GENMASK(15, 8)
++#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN BIT(16)
++#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT GENMASK(20, 17)
++
+ /* There are 15 BM ports and 4 BM groups supported by PPE.
+ * BM port (0-7) is for EDMA port 0, BM port (8-13) is for
+ * PPE physical port 1-6 and BM port 14 is for EIP port.
--- /dev/null
+From 63874f7c2e46f192e43e6214d66236372e36396c Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:41 +0800
+Subject: [PATCH] net: ethernet: qualcomm: Initialize PPE queue settings
+
+Configure unicast and multicast hardware queues for the PPE
+ports to enable packet forwarding between the ports.
+
+Each PPE port is assigned with a range of queues. The queue ID
+selection for a packet is decided by the queue base and queue
+offset that is configured based on the internal priority and
+the RSS hash value of the packet.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../net/ethernet/qualcomm/ppe/ppe_config.c | 356 +++++++++++++++++-
+ .../net/ethernet/qualcomm/ppe/ppe_config.h | 63 ++++
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 21 ++
+ 3 files changed, 439 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -128,6 +128,34 @@ struct ppe_scheduler_port_config {
+ unsigned int drr_node_id;
+ };
+
++/**
++ * struct ppe_port_schedule_resource - PPE port scheduler resource.
++ * @ucastq_start: Unicast queue start ID.
++ * @ucastq_end: Unicast queue end ID.
++ * @mcastq_start: Multicast queue start ID.
++ * @mcastq_end: Multicast queue end ID.
++ * @flow_id_start: Flow start ID.
++ * @flow_id_end: Flow end ID.
++ * @l0node_start: Scheduler node start ID for queue level.
++ * @l0node_end: Scheduler node end ID for queue level.
++ * @l1node_start: Scheduler node start ID for flow level.
++ * @l1node_end: Scheduler node end ID for flow level.
++ *
++ * PPE scheduler resource allocated among the PPE ports.
++ */
++struct ppe_port_schedule_resource {
++ unsigned int ucastq_start;
++ unsigned int ucastq_end;
++ unsigned int mcastq_start;
++ unsigned int mcastq_end;
++ unsigned int flow_id_start;
++ unsigned int flow_id_end;
++ unsigned int l0node_start;
++ unsigned int l0node_end;
++ unsigned int l1node_start;
++ unsigned int l1node_end;
++};
++
+ /* Assign the share buffer number 1550 to group 0 by default. */
+ static const int ipq9574_ppe_bm_group_config = 1550;
+
+@@ -676,6 +704,111 @@ static const struct ppe_scheduler_port_c
+ },
+ };
+
++/* The scheduler resource is applied to each PPE port, The resource
++ * includes the unicast & multicast queues, flow nodes and DRR nodes.
++ */
++static const struct ppe_port_schedule_resource ppe_scheduler_res[] = {
++ { .ucastq_start = 0,
++ .ucastq_end = 63,
++ .mcastq_start = 256,
++ .mcastq_end = 271,
++ .flow_id_start = 0,
++ .flow_id_end = 0,
++ .l0node_start = 0,
++ .l0node_end = 7,
++ .l1node_start = 0,
++ .l1node_end = 0,
++ },
++ { .ucastq_start = 144,
++ .ucastq_end = 159,
++ .mcastq_start = 272,
++ .mcastq_end = 275,
++ .flow_id_start = 36,
++ .flow_id_end = 39,
++ .l0node_start = 48,
++ .l0node_end = 63,
++ .l1node_start = 8,
++ .l1node_end = 11,
++ },
++ { .ucastq_start = 160,
++ .ucastq_end = 175,
++ .mcastq_start = 276,
++ .mcastq_end = 279,
++ .flow_id_start = 40,
++ .flow_id_end = 43,
++ .l0node_start = 64,
++ .l0node_end = 79,
++ .l1node_start = 12,
++ .l1node_end = 15,
++ },
++ { .ucastq_start = 176,
++ .ucastq_end = 191,
++ .mcastq_start = 280,
++ .mcastq_end = 283,
++ .flow_id_start = 44,
++ .flow_id_end = 47,
++ .l0node_start = 80,
++ .l0node_end = 95,
++ .l1node_start = 16,
++ .l1node_end = 19,
++ },
++ { .ucastq_start = 192,
++ .ucastq_end = 207,
++ .mcastq_start = 284,
++ .mcastq_end = 287,
++ .flow_id_start = 48,
++ .flow_id_end = 51,
++ .l0node_start = 96,
++ .l0node_end = 111,
++ .l1node_start = 20,
++ .l1node_end = 23,
++ },
++ { .ucastq_start = 208,
++ .ucastq_end = 223,
++ .mcastq_start = 288,
++ .mcastq_end = 291,
++ .flow_id_start = 52,
++ .flow_id_end = 55,
++ .l0node_start = 112,
++ .l0node_end = 127,
++ .l1node_start = 24,
++ .l1node_end = 27,
++ },
++ { .ucastq_start = 224,
++ .ucastq_end = 239,
++ .mcastq_start = 292,
++ .mcastq_end = 295,
++ .flow_id_start = 56,
++ .flow_id_end = 59,
++ .l0node_start = 128,
++ .l0node_end = 143,
++ .l1node_start = 28,
++ .l1node_end = 31,
++ },
++ { .ucastq_start = 240,
++ .ucastq_end = 255,
++ .mcastq_start = 296,
++ .mcastq_end = 299,
++ .flow_id_start = 60,
++ .flow_id_end = 63,
++ .l0node_start = 144,
++ .l0node_end = 159,
++ .l1node_start = 32,
++ .l1node_end = 35,
++ },
++ { .ucastq_start = 64,
++ .ucastq_end = 143,
++ .mcastq_start = 0,
++ .mcastq_end = 0,
++ .flow_id_start = 1,
++ .flow_id_end = 35,
++ .l0node_start = 8,
++ .l0node_end = 47,
++ .l1node_start = 1,
++ .l1node_end = 7,
++ },
++};
++
+ /* Set the PPE queue level scheduler configuration. */
+ static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
+ int node_id, int port,
+@@ -807,6 +940,149 @@ int ppe_queue_scheduler_set(struct ppe_d
+ port, scheduler_cfg);
+ }
+
++/**
++ * ppe_queue_ucast_base_set - Set PPE unicast queue base ID and profile ID
++ * @ppe_dev: PPE device
++ * @queue_dst: PPE queue destination configuration
++ * @queue_base: PPE queue base ID
++ * @profile_id: Profile ID
++ *
++ * The PPE unicast queue base ID and profile ID are configured based on the
++ * destination port information that can be service code or CPU code or the
++ * destination port.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
++ struct ppe_queue_ucast_dest queue_dst,
++ int queue_base, int profile_id)
++{
++ int index, profile_size;
++ u32 val, reg;
++
++ profile_size = queue_dst.src_profile << 8;
++ if (queue_dst.service_code_en)
++ index = PPE_QUEUE_BASE_SERVICE_CODE + profile_size +
++ queue_dst.service_code;
++ else if (queue_dst.cpu_code_en)
++ index = PPE_QUEUE_BASE_CPU_CODE + profile_size +
++ queue_dst.cpu_code;
++ else
++ index = profile_size + queue_dst.dest_port;
++
++ val = FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID, profile_id);
++ val |= FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID, queue_base);
++ reg = PPE_UCAST_QUEUE_MAP_TBL_ADDR + index * PPE_UCAST_QUEUE_MAP_TBL_INC;
++
++ return regmap_write(ppe_dev->regmap, reg, val);
++}
++
++/**
++ * ppe_queue_ucast_offset_pri_set - Set PPE unicast queue offset based on priority
++ * @ppe_dev: PPE device
++ * @profile_id: Profile ID
++ * @priority: PPE internal priority to be used to set queue offset
++ * @queue_offset: Queue offset used for calculating the destination queue ID
++ *
++ * The PPE unicast queue offset is configured based on the PPE
++ * internal priority.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
++ int profile_id,
++ int priority,
++ int queue_offset)
++{
++ u32 val, reg;
++ int index;
++
++ index = (profile_id << 4) + priority;
++ val = FIELD_PREP(PPE_UCAST_PRIORITY_MAP_TBL_CLASS, queue_offset);
++ reg = PPE_UCAST_PRIORITY_MAP_TBL_ADDR + index * PPE_UCAST_PRIORITY_MAP_TBL_INC;
++
++ return regmap_write(ppe_dev->regmap, reg, val);
++}
++
++/**
++ * ppe_queue_ucast_offset_hash_set - Set PPE unicast queue offset based on hash
++ * @ppe_dev: PPE device
++ * @profile_id: Profile ID
++ * @rss_hash: Packet hash value to be used to set queue offset
++ * @queue_offset: Queue offset used for calculating the destination queue ID
++ *
++ * The PPE unicast queue offset is configured based on the RSS hash value.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
++ int profile_id,
++ int rss_hash,
++ int queue_offset)
++{
++ u32 val, reg;
++ int index;
++
++ index = (profile_id << 8) + rss_hash;
++ val = FIELD_PREP(PPE_UCAST_HASH_MAP_TBL_HASH, queue_offset);
++ reg = PPE_UCAST_HASH_MAP_TBL_ADDR + index * PPE_UCAST_HASH_MAP_TBL_INC;
++
++ return regmap_write(ppe_dev->regmap, reg, val);
++}
++
++/**
++ * ppe_port_resource_get - Get PPE resource per port
++ * @ppe_dev: PPE device
++ * @port: PPE port
++ * @type: Resource type
++ * @res_start: Resource start ID returned
++ * @res_end: Resource end ID returned
++ *
++ * PPE resource is assigned per PPE port, which is acquired for QoS scheduler.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
++ enum ppe_resource_type type,
++ int *res_start, int *res_end)
++{
++ struct ppe_port_schedule_resource res;
++
++ /* The reserved resource with the maximum port ID of PPE is
++ * also allowed to be acquired.
++ */
++ if (port > ppe_dev->num_ports)
++ return -EINVAL;
++
++ res = ppe_scheduler_res[port];
++ switch (type) {
++ case PPE_RES_UCAST:
++ *res_start = res.ucastq_start;
++ *res_end = res.ucastq_end;
++ break;
++ case PPE_RES_MCAST:
++ *res_start = res.mcastq_start;
++ *res_end = res.mcastq_end;
++ break;
++ case PPE_RES_FLOW_ID:
++ *res_start = res.flow_id_start;
++ *res_end = res.flow_id_end;
++ break;
++ case PPE_RES_L0_NODE:
++ *res_start = res.l0node_start;
++ *res_end = res.l0node_end;
++ break;
++ case PPE_RES_L1_NODE:
++ *res_start = res.l1node_start;
++ *res_end = res.l1node_end;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+ const struct ppe_bm_port_config port_cfg)
+ {
+@@ -1140,6 +1416,80 @@ sch_config_fail:
+ return ret;
+ };
+
++/* Configure PPE queue destination of each PPE port. */
++static int ppe_queue_dest_init(struct ppe_device *ppe_dev)
++{
++ int ret, port_id, index, q_base, q_offset, res_start, res_end, pri_max;
++ struct ppe_queue_ucast_dest queue_dst;
++
++ for (port_id = 0; port_id < ppe_dev->num_ports; port_id++) {
++ memset(&queue_dst, 0, sizeof(queue_dst));
++
++ ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_UCAST,
++ &res_start, &res_end);
++ if (ret)
++ return ret;
++
++ q_base = res_start;
++ queue_dst.dest_port = port_id;
++
++ /* Configure queue base ID and profile ID that is same as
++ * physical port ID.
++ */
++ ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
++ q_base, port_id);
++ if (ret)
++ return ret;
++
++ /* Queue priority range supported by each PPE port */
++ ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_L0_NODE,
++ &res_start, &res_end);
++ if (ret)
++ return ret;
++
++ pri_max = res_end - res_start;
++
++ /* Redirect ARP reply packet with the max priority on CPU port,
++ * which keeps the ARP reply directed to CPU (CPU code is 101)
++ * with highest priority queue of EDMA.
++ */
++ if (port_id == 0) {
++ memset(&queue_dst, 0, sizeof(queue_dst));
++
++ queue_dst.cpu_code_en = true;
++ queue_dst.cpu_code = 101;
++ ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
++ q_base + pri_max,
++ 0);
++ if (ret)
++ return ret;
++ }
++
++ /* Initialize the queue offset of internal priority. */
++ for (index = 0; index < PPE_QUEUE_INTER_PRI_NUM; index++) {
++ q_offset = index > pri_max ? pri_max : index;
++
++ ret = ppe_queue_ucast_offset_pri_set(ppe_dev, port_id,
++ index, q_offset);
++ if (ret)
++ return ret;
++ }
++
++ /* Initialize the queue offset of RSS hash as 0 to avoid the
++ * random hardware value that will lead to the unexpected
++ * destination queue generated.
++ */
++ for (index = 0; index < PPE_QUEUE_HASH_NUM; index++) {
++ ret = ppe_queue_ucast_offset_hash_set(ppe_dev, port_id,
++ index, 0);
++ if (ret)
++ return ret;
++ }
++ }
++
++ return 0;
++}
++
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+ {
+ int ret;
+@@ -1152,5 +1502,9 @@ int ppe_hw_config(struct ppe_device *ppe
+ if (ret)
+ return ret;
+
+- return ppe_config_scheduler(ppe_dev);
++ ret = ppe_config_scheduler(ppe_dev);
++ if (ret)
++ return ret;
++
++ return ppe_queue_dest_init(ppe_dev);
+ }
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -8,6 +8,16 @@
+
+ #include "ppe.h"
+
++/* There are different table index ranges for configuring queue base ID of
++ * the destination port, CPU code and service code.
++ */
++#define PPE_QUEUE_BASE_DEST_PORT 0
++#define PPE_QUEUE_BASE_CPU_CODE 1024
++#define PPE_QUEUE_BASE_SERVICE_CODE 2048
++
++#define PPE_QUEUE_INTER_PRI_NUM 16
++#define PPE_QUEUE_HASH_NUM 256
++
+ /**
+ * enum ppe_scheduler_frame_mode - PPE scheduler frame mode.
+ * @PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC: The scheduled frame includes IPG,
+@@ -42,8 +52,61 @@ struct ppe_scheduler_cfg {
+ enum ppe_scheduler_frame_mode frame_mode;
+ };
+
++/**
++ * enum ppe_resource_type - PPE resource type.
++ * @PPE_RES_UCAST: Unicast queue resource.
++ * @PPE_RES_MCAST: Multicast queue resource.
++ * @PPE_RES_L0_NODE: Level 0 for queue based node resource.
++ * @PPE_RES_L1_NODE: Level 1 for flow based node resource.
++ * @PPE_RES_FLOW_ID: Flow based node resource.
++ */
++enum ppe_resource_type {
++ PPE_RES_UCAST,
++ PPE_RES_MCAST,
++ PPE_RES_L0_NODE,
++ PPE_RES_L1_NODE,
++ PPE_RES_FLOW_ID,
++};
++
++/**
++ * struct ppe_queue_ucast_dest - PPE unicast queue destination.
++ * @src_profile: Source profile.
++ * @service_code_en: Enable service code to map the queue base ID.
++ * @service_code: Service code.
++ * @cpu_code_en: Enable CPU code to map the queue base ID.
++ * @cpu_code: CPU code.
++ * @dest_port: destination port.
++ *
++ * PPE egress queue ID is decided by the service code if enabled, otherwise
++ * by the CPU code if enabled, or by destination port if both service code
++ * and CPU code are disabled.
++ */
++struct ppe_queue_ucast_dest {
++ int src_profile;
++ bool service_code_en;
++ int service_code;
++ bool cpu_code_en;
++ int cpu_code;
++ int dest_port;
++};
++
+ int ppe_hw_config(struct ppe_device *ppe_dev);
+ int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int port,
+ struct ppe_scheduler_cfg scheduler_cfg);
++int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
++ struct ppe_queue_ucast_dest queue_dst,
++ int queue_base,
++ int profile_id);
++int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
++ int profile_id,
++ int priority,
++ int queue_offset);
++int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
++ int profile_id,
++ int rss_hash,
++ int queue_offset);
++int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
++ enum ppe_resource_type type,
++ int *res_start, int *res_end);
+ #endif
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -164,6 +164,27 @@
+ #define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value) \
+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_PRE_ALLOC)
+
++/* The queue base configurations based on destination port,
++ * service code or CPU code.
++ */
++#define PPE_UCAST_QUEUE_MAP_TBL_ADDR 0x810000
++#define PPE_UCAST_QUEUE_MAP_TBL_ENTRIES 3072
++#define PPE_UCAST_QUEUE_MAP_TBL_INC 0x10
++#define PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID GENMASK(3, 0)
++#define PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID GENMASK(11, 4)
++
++/* The queue offset configurations based on RSS hash value. */
++#define PPE_UCAST_HASH_MAP_TBL_ADDR 0x830000
++#define PPE_UCAST_HASH_MAP_TBL_ENTRIES 4096
++#define PPE_UCAST_HASH_MAP_TBL_INC 0x10
++#define PPE_UCAST_HASH_MAP_TBL_HASH GENMASK(7, 0)
++
++/* The queue offset configurations based on PPE internal priority. */
++#define PPE_UCAST_PRIORITY_MAP_TBL_ADDR 0x842000
++#define PPE_UCAST_PRIORITY_MAP_TBL_ENTRIES 256
++#define PPE_UCAST_PRIORITY_MAP_TBL_INC 0x10
++#define PPE_UCAST_PRIORITY_MAP_TBL_CLASS GENMASK(3, 0)
++
+ /* PPE unicast queue (0-255) configurations. */
+ #define PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR 0x848000
+ #define PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES 256
--- /dev/null
+From 4147ce0d95816bded5c5e6cb276b1aa9f2620045 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:42 +0800
+Subject: [PATCH] net: ethernet: qualcomm: Initialize PPE service code settings
+
+PPE service code is a special code (0-255) that is defined by PPE for
+PPE's packet processing stages, as per the network functions required
+for the packet.
+
+For packet being sent out by ARM cores on Ethernet ports, The service
+code 1 is used as the default service code. This service code is used
+to bypass most of packet processing stages of the PPE before the packet
+transmitted out PPE port, since the software network stack has already
+processed the packet.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../net/ethernet/qualcomm/ppe/ppe_config.c | 95 +++++++++++-
+ .../net/ethernet/qualcomm/ppe/ppe_config.h | 145 ++++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 53 +++++++
+ 3 files changed, 292 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -8,6 +8,7 @@
+ */
+
+ #include <linux/bitfield.h>
++#include <linux/bitmap.h>
+ #include <linux/bits.h>
+ #include <linux/device.h>
+ #include <linux/regmap.h>
+@@ -1083,6 +1084,75 @@ int ppe_port_resource_get(struct ppe_dev
+ return 0;
+ }
+
++/**
++ * ppe_sc_config_set - Set PPE service code configuration
++ * @ppe_dev: PPE device
++ * @sc: Service ID, 0-255 supported by PPE
++ * @cfg: Service code configuration
++ *
++ * PPE service code is used by the PPE during its packet processing stages,
++ * to perform or bypass certain selected packet operations on the packet.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc, struct ppe_sc_cfg cfg)
++{
++ u32 val, reg, servcode_val[2] = {};
++ unsigned long bitmap_value;
++ int ret;
++
++ val = FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID, cfg.dest_port_valid);
++ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID, cfg.dest_port);
++ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_DIRECTION, cfg.is_src);
++
++ bitmap_value = bitmap_read(cfg.bitmaps.egress, 0, PPE_SC_BYPASS_EGRESS_SIZE);
++ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP, bitmap_value);
++ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_RX_CNT_EN,
++ test_bit(PPE_SC_BYPASS_COUNTER_RX, cfg.bitmaps.counter));
++ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_TX_CNT_EN,
++ test_bit(PPE_SC_BYPASS_COUNTER_TX, cfg.bitmaps.counter));
++ reg = PPE_IN_L2_SERVICE_TBL_ADDR + PPE_IN_L2_SERVICE_TBL_INC * sc;
++
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ return ret;
++
++ bitmap_value = bitmap_read(cfg.bitmaps.ingress, 0, PPE_SC_BYPASS_INGRESS_SIZE);
++ PPE_SERVICE_SET_BYPASS_BITMAP(servcode_val, bitmap_value);
++ PPE_SERVICE_SET_RX_CNT_EN(servcode_val,
++ test_bit(PPE_SC_BYPASS_COUNTER_RX_VLAN, cfg.bitmaps.counter));
++ reg = PPE_SERVICE_TBL_ADDR + PPE_SERVICE_TBL_INC * sc;
++
++ ret = regmap_bulk_write(ppe_dev->regmap, reg,
++ servcode_val, ARRAY_SIZE(servcode_val));
++ if (ret)
++ return ret;
++
++ reg = PPE_EG_SERVICE_TBL_ADDR + PPE_EG_SERVICE_TBL_INC * sc;
++ ret = regmap_bulk_read(ppe_dev->regmap, reg,
++ servcode_val, ARRAY_SIZE(servcode_val));
++ if (ret)
++ return ret;
++
++ PPE_EG_SERVICE_SET_NEXT_SERVCODE(servcode_val, cfg.next_service_code);
++ PPE_EG_SERVICE_SET_UPDATE_ACTION(servcode_val, cfg.eip_field_update_bitmap);
++ PPE_EG_SERVICE_SET_HW_SERVICE(servcode_val, cfg.eip_hw_service);
++ PPE_EG_SERVICE_SET_OFFSET_SEL(servcode_val, cfg.eip_offset_sel);
++ PPE_EG_SERVICE_SET_TX_CNT_EN(servcode_val,
++ test_bit(PPE_SC_BYPASS_COUNTER_TX_VLAN, cfg.bitmaps.counter));
++
++ ret = regmap_bulk_write(ppe_dev->regmap, reg,
++ servcode_val, ARRAY_SIZE(servcode_val));
++ if (ret)
++ return ret;
++
++ bitmap_value = bitmap_read(cfg.bitmaps.tunnel, 0, PPE_SC_BYPASS_TUNNEL_SIZE);
++ val = FIELD_PREP(PPE_TL_SERVICE_TBL_BYPASS_BITMAP, bitmap_value);
++ reg = PPE_TL_SERVICE_TBL_ADDR + PPE_TL_SERVICE_TBL_INC * sc;
++
++ return regmap_write(ppe_dev->regmap, reg, val);
++}
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+ const struct ppe_bm_port_config port_cfg)
+ {
+@@ -1490,6 +1560,25 @@ static int ppe_queue_dest_init(struct pp
+ return 0;
+ }
+
++/* Initialize the service code 1 used by CPU port. */
++static int ppe_servcode_init(struct ppe_device *ppe_dev)
++{
++ struct ppe_sc_cfg sc_cfg = {};
++
++ bitmap_zero(sc_cfg.bitmaps.counter, PPE_SC_BYPASS_COUNTER_SIZE);
++ bitmap_zero(sc_cfg.bitmaps.tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
++
++ bitmap_fill(sc_cfg.bitmaps.ingress, PPE_SC_BYPASS_INGRESS_SIZE);
++ clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER, sc_cfg.bitmaps.ingress);
++ clear_bit(PPE_SC_BYPASS_INGRESS_SERVICE_CODE, sc_cfg.bitmaps.ingress);
++ clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO, sc_cfg.bitmaps.ingress);
++
++ bitmap_fill(sc_cfg.bitmaps.egress, PPE_SC_BYPASS_EGRESS_SIZE);
++ clear_bit(PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK, sc_cfg.bitmaps.egress);
++
++ return ppe_sc_config_set(ppe_dev, PPE_EDMA_SC_BYPASS_ID, sc_cfg);
++}
++
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+ {
+ int ret;
+@@ -1506,5 +1595,9 @@ int ppe_hw_config(struct ppe_device *ppe
+ if (ret)
+ return ret;
+
+- return ppe_queue_dest_init(ppe_dev);
++ ret = ppe_queue_dest_init(ppe_dev);
++ if (ret)
++ return ret;
++
++ return ppe_servcode_init(ppe_dev);
+ }
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -6,6 +6,8 @@
+ #ifndef __PPE_CONFIG_H__
+ #define __PPE_CONFIG_H__
+
++#include <linux/types.h>
++
+ #include "ppe.h"
+
+ /* There are different table index ranges for configuring queue base ID of
+@@ -18,6 +20,9 @@
+ #define PPE_QUEUE_INTER_PRI_NUM 16
+ #define PPE_QUEUE_HASH_NUM 256
+
++/* The service code is used by EDMA port to transmit packet to PPE. */
++#define PPE_EDMA_SC_BYPASS_ID 1
++
+ /**
+ * enum ppe_scheduler_frame_mode - PPE scheduler frame mode.
+ * @PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC: The scheduled frame includes IPG,
+@@ -90,6 +95,144 @@ struct ppe_queue_ucast_dest {
+ int dest_port;
+ };
+
++/* Hardware bitmaps for bypassing features of the ingress packet. */
++enum ppe_sc_ingress_type {
++ PPE_SC_BYPASS_INGRESS_VLAN_TAG_FMT_CHECK = 0,
++ PPE_SC_BYPASS_INGRESS_VLAN_MEMBER_CHECK = 1,
++ PPE_SC_BYPASS_INGRESS_VLAN_TRANSLATE = 2,
++ PPE_SC_BYPASS_INGRESS_MY_MAC_CHECK = 3,
++ PPE_SC_BYPASS_INGRESS_DIP_LOOKUP = 4,
++ PPE_SC_BYPASS_INGRESS_FLOW_LOOKUP = 5,
++ PPE_SC_BYPASS_INGRESS_FLOW_ACTION = 6,
++ PPE_SC_BYPASS_INGRESS_ACL = 7,
++ PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER = 8,
++ PPE_SC_BYPASS_INGRESS_SERVICE_CODE = 9,
++ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L2 = 10,
++ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV4 = 11,
++ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV6 = 12,
++ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L4 = 13,
++ PPE_SC_BYPASS_INGRESS_FLOW_SERVICE_CODE = 14,
++ PPE_SC_BYPASS_INGRESS_ACL_SERVICE_CODE = 15,
++ PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO = 16,
++ PPE_SC_BYPASS_INGRESS_PPPOE_TERMINATION = 17,
++ PPE_SC_BYPASS_INGRESS_DEFAULT_VLAN = 18,
++ PPE_SC_BYPASS_INGRESS_DEFAULT_PCP = 19,
++ PPE_SC_BYPASS_INGRESS_VSI_ASSIGN = 20,
++ /* Values 21-23 are not specified by hardware. */
++ PPE_SC_BYPASS_INGRESS_VLAN_ASSIGN_FAIL = 24,
++ PPE_SC_BYPASS_INGRESS_SOURCE_GUARD = 25,
++ PPE_SC_BYPASS_INGRESS_MRU_MTU_CHECK = 26,
++ PPE_SC_BYPASS_INGRESS_FLOW_SRC_CHECK = 27,
++ PPE_SC_BYPASS_INGRESS_FLOW_QOS = 28,
++ /* This must be last as it determines the size of the BITMAP. */
++ PPE_SC_BYPASS_INGRESS_SIZE,
++};
++
++/* Hardware bitmaps for bypassing features of the egress packet. */
++enum ppe_sc_egress_type {
++ PPE_SC_BYPASS_EGRESS_VLAN_MEMBER_CHECK = 0,
++ PPE_SC_BYPASS_EGRESS_VLAN_TRANSLATE = 1,
++ PPE_SC_BYPASS_EGRESS_VLAN_TAG_FMT_CTRL = 2,
++ PPE_SC_BYPASS_EGRESS_FDB_LEARN = 3,
++ PPE_SC_BYPASS_EGRESS_FDB_REFRESH = 4,
++ PPE_SC_BYPASS_EGRESS_L2_SOURCE_SECURITY = 5,
++ PPE_SC_BYPASS_EGRESS_MANAGEMENT_FWD = 6,
++ PPE_SC_BYPASS_EGRESS_BRIDGING_FWD = 7,
++ PPE_SC_BYPASS_EGRESS_IN_STP_FLTR = 8,
++ PPE_SC_BYPASS_EGRESS_EG_STP_FLTR = 9,
++ PPE_SC_BYPASS_EGRESS_SOURCE_FLTR = 10,
++ PPE_SC_BYPASS_EGRESS_POLICER = 11,
++ PPE_SC_BYPASS_EGRESS_L2_PKT_EDIT = 12,
++ PPE_SC_BYPASS_EGRESS_L3_PKT_EDIT = 13,
++ PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK = 14,
++ PPE_SC_BYPASS_EGRESS_PORT_ISOLATION = 15,
++ PPE_SC_BYPASS_EGRESS_PRE_ACL_QOS = 16,
++ PPE_SC_BYPASS_EGRESS_POST_ACL_QOS = 17,
++ PPE_SC_BYPASS_EGRESS_DSCP_QOS = 18,
++ PPE_SC_BYPASS_EGRESS_PCP_QOS = 19,
++ PPE_SC_BYPASS_EGRESS_PREHEADER_QOS = 20,
++ PPE_SC_BYPASS_EGRESS_FAKE_MAC_DROP = 21,
++ PPE_SC_BYPASS_EGRESS_TUNL_CONTEXT = 22,
++ PPE_SC_BYPASS_EGRESS_FLOW_POLICER = 23,
++ /* This must be last as it determines the size of the BITMAP. */
++ PPE_SC_BYPASS_EGRESS_SIZE,
++};
++
++/* Hardware bitmaps for bypassing counter of packet. */
++enum ppe_sc_counter_type {
++ PPE_SC_BYPASS_COUNTER_RX_VLAN = 0,
++ PPE_SC_BYPASS_COUNTER_RX = 1,
++ PPE_SC_BYPASS_COUNTER_TX_VLAN = 2,
++ PPE_SC_BYPASS_COUNTER_TX = 3,
++ /* This must be last as it determines the size of the BITMAP. */
++ PPE_SC_BYPASS_COUNTER_SIZE,
++};
++
++/* Hardware bitmaps for bypassing features of tunnel packet. */
++enum ppe_sc_tunnel_type {
++ PPE_SC_BYPASS_TUNNEL_SERVICE_CODE = 0,
++ PPE_SC_BYPASS_TUNNEL_TUNNEL_HANDLE = 1,
++ PPE_SC_BYPASS_TUNNEL_L3_IF_CHECK = 2,
++ PPE_SC_BYPASS_TUNNEL_VLAN_CHECK = 3,
++ PPE_SC_BYPASS_TUNNEL_DMAC_CHECK = 4,
++ PPE_SC_BYPASS_TUNNEL_UDP_CSUM_0_CHECK = 5,
++ PPE_SC_BYPASS_TUNNEL_TBL_DE_ACCE_CHECK = 6,
++ PPE_SC_BYPASS_TUNNEL_PPPOE_MC_TERM_CHECK = 7,
++ PPE_SC_BYPASS_TUNNEL_TTL_EXCEED_CHECK = 8,
++ PPE_SC_BYPASS_TUNNEL_MAP_SRC_CHECK = 9,
++ PPE_SC_BYPASS_TUNNEL_MAP_DST_CHECK = 10,
++ PPE_SC_BYPASS_TUNNEL_LPM_DST_LOOKUP = 11,
++ PPE_SC_BYPASS_TUNNEL_LPM_LOOKUP = 12,
++ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L2 = 13,
++ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV4 = 14,
++ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV6 = 15,
++ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L4 = 16,
++ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_TUNNEL = 17,
++ /* Values 18-19 are not specified by hardware. */
++ PPE_SC_BYPASS_TUNNEL_PRE_IPO = 20,
++ /* This must be last as it determines the size of the BITMAP. */
++ PPE_SC_BYPASS_TUNNEL_SIZE,
++};
++
++/**
++ * struct ppe_sc_bypass - PPE service bypass bitmaps
++ * @ingress: Bitmap of features that can be bypassed on the ingress packet.
++ * @egress: Bitmap of features that can be bypassed on the egress packet.
++ * @counter: Bitmap of features that can be bypassed on the counter type.
++ * @tunnel: Bitmap of features that can be bypassed on the tunnel packet.
++ */
++struct ppe_sc_bypass {
++ DECLARE_BITMAP(ingress, PPE_SC_BYPASS_INGRESS_SIZE);
++ DECLARE_BITMAP(egress, PPE_SC_BYPASS_EGRESS_SIZE);
++ DECLARE_BITMAP(counter, PPE_SC_BYPASS_COUNTER_SIZE);
++ DECLARE_BITMAP(tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
++};
++
++/**
++ * struct ppe_sc_cfg - PPE service code configuration.
++ * @dest_port_valid: Generate destination port or not.
++ * @dest_port: Destination port ID.
++ * @bitmaps: Bitmap of bypass features.
++ * @is_src: Destination port acts as source port, packet sent to CPU.
++ * @next_service_code: New service code generated.
++ * @eip_field_update_bitmap: Fields updated as actions taken for EIP.
++ * @eip_hw_service: Selected hardware functions for EIP.
++ * @eip_offset_sel: Packet offset selection, using packet's layer 4 offset
++ * or using packet's layer 3 offset for EIP.
++ *
++ * Service code is generated during the packet passing through PPE.
++ */
++struct ppe_sc_cfg {
++ bool dest_port_valid;
++ int dest_port;
++ struct ppe_sc_bypass bitmaps;
++ bool is_src;
++ int next_service_code;
++ int eip_field_update_bitmap;
++ int eip_hw_service;
++ int eip_offset_sel;
++};
++
+ int ppe_hw_config(struct ppe_device *ppe_dev);
+ int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int port,
+@@ -109,4 +252,6 @@ int ppe_queue_ucast_offset_hash_set(stru
+ int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
+ enum ppe_resource_type type,
+ int *res_start, int *res_end);
++int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc,
++ struct ppe_sc_cfg cfg);
+ #endif
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -25,10 +25,63 @@
+ #define PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID BIT(6)
+ #define PPE_BM_SCH_CFG_TBL_SECOND_PORT GENMASK(11, 8)
+
++/* PPE service code configuration for the ingress direction functions,
++ * including bypass configuration for relevant PPE switch core functions
++ * such as flow entry lookup bypass.
++ */
++#define PPE_SERVICE_TBL_ADDR 0x15000
++#define PPE_SERVICE_TBL_ENTRIES 256
++#define PPE_SERVICE_TBL_INC 0x10
++#define PPE_SERVICE_W0_BYPASS_BITMAP GENMASK(31, 0)
++#define PPE_SERVICE_W1_RX_CNT_EN BIT(0)
++
++#define PPE_SERVICE_SET_BYPASS_BITMAP(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_SERVICE_W0_BYPASS_BITMAP)
++#define PPE_SERVICE_SET_RX_CNT_EN(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_SERVICE_W1_RX_CNT_EN)
++
+ /* PPE queue counters enable/disable control. */
+ #define PPE_EG_BRIDGE_CONFIG_ADDR 0x20044
+ #define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN BIT(2)
+
++/* PPE service code configuration on the egress direction. */
++#define PPE_EG_SERVICE_TBL_ADDR 0x43000
++#define PPE_EG_SERVICE_TBL_ENTRIES 256
++#define PPE_EG_SERVICE_TBL_INC 0x10
++#define PPE_EG_SERVICE_W0_UPDATE_ACTION GENMASK(31, 0)
++#define PPE_EG_SERVICE_W1_NEXT_SERVCODE GENMASK(7, 0)
++#define PPE_EG_SERVICE_W1_HW_SERVICE GENMASK(13, 8)
++#define PPE_EG_SERVICE_W1_OFFSET_SEL BIT(14)
++#define PPE_EG_SERVICE_W1_TX_CNT_EN BIT(15)
++
++#define PPE_EG_SERVICE_SET_UPDATE_ACTION(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_EG_SERVICE_W0_UPDATE_ACTION)
++#define PPE_EG_SERVICE_SET_NEXT_SERVCODE(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_NEXT_SERVCODE)
++#define PPE_EG_SERVICE_SET_HW_SERVICE(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_HW_SERVICE)
++#define PPE_EG_SERVICE_SET_OFFSET_SEL(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_OFFSET_SEL)
++#define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_TX_CNT_EN)
++
++/* PPE service code configuration for destination port and counter. */
++#define PPE_IN_L2_SERVICE_TBL_ADDR 0x66000
++#define PPE_IN_L2_SERVICE_TBL_ENTRIES 256
++#define PPE_IN_L2_SERVICE_TBL_INC 0x10
++#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID BIT(0)
++#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID GENMASK(4, 1)
++#define PPE_IN_L2_SERVICE_TBL_DST_DIRECTION BIT(5)
++#define PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP GENMASK(29, 6)
++#define PPE_IN_L2_SERVICE_TBL_RX_CNT_EN BIT(30)
++#define PPE_IN_L2_SERVICE_TBL_TX_CNT_EN BIT(31)
++
++/* PPE service code configuration for the tunnel packet. */
++#define PPE_TL_SERVICE_TBL_ADDR 0x306000
++#define PPE_TL_SERVICE_TBL_ENTRIES 256
++#define PPE_TL_SERVICE_TBL_INC 4
++#define PPE_TL_SERVICE_TBL_BYPASS_BITMAP GENMASK(31, 0)
++
+ /* Port scheduler global config. */
+ #define PPE_PSCH_SCH_DEPTH_CFG_ADDR 0x400000
+ #define PPE_PSCH_SCH_DEPTH_CFG_INC 4
--- /dev/null
+From 63af46200da794acda25cf8083bde0c1576b0859 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:43 +0800
+Subject: [PATCH] net: ethernet: qualcomm: Initialize PPE port control settings
+
+1. Enable port specific counters in PPE.
+2. Configure the default action as drop when the packet size
+ is more than the configured MTU of physical port.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../net/ethernet/qualcomm/ppe/ppe_config.c | 86 ++++++++++++++++++-
+ .../net/ethernet/qualcomm/ppe/ppe_config.h | 15 ++++
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 47 ++++++++++
+ 3 files changed, 147 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -1153,6 +1153,44 @@ int ppe_sc_config_set(struct ppe_device
+ return regmap_write(ppe_dev->regmap, reg, val);
+ }
+
++/**
++ * ppe_counter_enable_set - Set PPE port counter enabled
++ * @ppe_dev: PPE device
++ * @port: PPE port ID
++ *
++ * Enable PPE counters on the given port for the unicast packet, multicast
++ * packet and VLAN packet received and transmitted by PPE.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port)
++{
++ u32 reg, mru_mtu_val[3];
++ int ret;
++
++ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * port;
++ ret = regmap_bulk_read(ppe_dev->regmap, reg,
++ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
++ if (ret)
++ return ret;
++
++ PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(mru_mtu_val, true);
++ PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(mru_mtu_val, true);
++ ret = regmap_bulk_write(ppe_dev->regmap, reg,
++ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
++ if (ret)
++ return ret;
++
++ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * port;
++ ret = regmap_set_bits(ppe_dev->regmap, reg, PPE_MC_MTU_CTRL_TBL_TX_CNT_EN);
++ if (ret)
++ return ret;
++
++ reg = PPE_PORT_EG_VLAN_TBL_ADDR + PPE_PORT_EG_VLAN_TBL_INC * port;
++
++ return regmap_set_bits(ppe_dev->regmap, reg, PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN);
++}
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+ const struct ppe_bm_port_config port_cfg)
+ {
+@@ -1579,6 +1617,48 @@ static int ppe_servcode_init(struct ppe_
+ return ppe_sc_config_set(ppe_dev, PPE_EDMA_SC_BYPASS_ID, sc_cfg);
+ }
+
++/* Initialize PPE port configurations. */
++static int ppe_port_config_init(struct ppe_device *ppe_dev)
++{
++ u32 reg, val, mru_mtu_val[3];
++ int i, ret;
++
++ /* MTU and MRU settings are not required for CPU port 0. */
++ for (i = 1; i < ppe_dev->num_ports; i++) {
++ /* Enable Ethernet port counter */
++ ret = ppe_counter_enable_set(ppe_dev, i);
++ if (ret)
++ return ret;
++
++ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * i;
++ ret = regmap_bulk_read(ppe_dev->regmap, reg,
++ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
++ if (ret)
++ return ret;
++
++ /* Drop the packet when the packet size is more than
++ * the MTU or MRU of the physical interface.
++ */
++ PPE_MRU_MTU_CTRL_SET_MRU_CMD(mru_mtu_val, PPE_ACTION_DROP);
++ PPE_MRU_MTU_CTRL_SET_MTU_CMD(mru_mtu_val, PPE_ACTION_DROP);
++ ret = regmap_bulk_write(ppe_dev->regmap, reg,
++ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
++ if (ret)
++ return ret;
++
++ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * i;
++ val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_MTU_CMD, PPE_ACTION_DROP);
++ ret = regmap_update_bits(ppe_dev->regmap, reg,
++ PPE_MC_MTU_CTRL_TBL_MTU_CMD,
++ val);
++ if (ret)
++ return ret;
++ }
++
++ /* Enable CPU port counters. */
++ return ppe_counter_enable_set(ppe_dev, 0);
++}
++
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+ {
+ int ret;
+@@ -1599,5 +1679,9 @@ int ppe_hw_config(struct ppe_device *ppe
+ if (ret)
+ return ret;
+
+- return ppe_servcode_init(ppe_dev);
++ ret = ppe_servcode_init(ppe_dev);
++ if (ret)
++ return ret;
++
++ return ppe_port_config_init(ppe_dev);
+ }
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -233,6 +233,20 @@ struct ppe_sc_cfg {
+ int eip_offset_sel;
+ };
+
++/**
++ * enum ppe_action_type - PPE action of the received packet.
++ * @PPE_ACTION_FORWARD: Packet forwarded per L2/L3 process.
++ * @PPE_ACTION_DROP: Packet dropped by PPE.
++ * @PPE_ACTION_COPY_TO_CPU: Packet copied to CPU port per multicast queue.
++ * @PPE_ACTION_REDIRECT_TO_CPU: Packet redirected to CPU port per unicast queue.
++ */
++enum ppe_action_type {
++ PPE_ACTION_FORWARD = 0,
++ PPE_ACTION_DROP = 1,
++ PPE_ACTION_COPY_TO_CPU = 2,
++ PPE_ACTION_REDIRECT_TO_CPU = 3,
++};
++
+ int ppe_hw_config(struct ppe_device *ppe_dev);
+ int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int port,
+@@ -254,4 +268,5 @@ int ppe_port_resource_get(struct ppe_dev
+ int *res_start, int *res_end);
+ int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc,
+ struct ppe_sc_cfg cfg);
++int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port);
+ #endif
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -40,6 +40,18 @@
+ #define PPE_SERVICE_SET_RX_CNT_EN(tbl_cfg, value) \
+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_SERVICE_W1_RX_CNT_EN)
+
++/* PPE port egress VLAN configurations. */
++#define PPE_PORT_EG_VLAN_TBL_ADDR 0x20020
++#define PPE_PORT_EG_VLAN_TBL_ENTRIES 8
++#define PPE_PORT_EG_VLAN_TBL_INC 4
++#define PPE_PORT_EG_VLAN_TBL_VLAN_TYPE BIT(0)
++#define PPE_PORT_EG_VLAN_TBL_CTAG_MODE GENMASK(2, 1)
++#define PPE_PORT_EG_VLAN_TBL_STAG_MODE GENMASK(4, 3)
++#define PPE_PORT_EG_VLAN_TBL_VSI_TAG_MODE_EN BIT(5)
++#define PPE_PORT_EG_VLAN_TBL_PCP_PROP_CMD BIT(6)
++#define PPE_PORT_EG_VLAN_TBL_DEI_PROP_CMD BIT(7)
++#define PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN BIT(8)
++
+ /* PPE queue counters enable/disable control. */
+ #define PPE_EG_BRIDGE_CONFIG_ADDR 0x20044
+ #define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN BIT(2)
+@@ -65,6 +77,41 @@
+ #define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value) \
+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_TX_CNT_EN)
+
++/* PPE port control configurations for the traffic to the multicast queues. */
++#define PPE_MC_MTU_CTRL_TBL_ADDR 0x60a00
++#define PPE_MC_MTU_CTRL_TBL_ENTRIES 8
++#define PPE_MC_MTU_CTRL_TBL_INC 4
++#define PPE_MC_MTU_CTRL_TBL_MTU GENMASK(13, 0)
++#define PPE_MC_MTU_CTRL_TBL_MTU_CMD GENMASK(15, 14)
++#define PPE_MC_MTU_CTRL_TBL_TX_CNT_EN BIT(16)
++
++/* PPE port control configurations for the traffic to the unicast queues. */
++#define PPE_MRU_MTU_CTRL_TBL_ADDR 0x65000
++#define PPE_MRU_MTU_CTRL_TBL_ENTRIES 256
++#define PPE_MRU_MTU_CTRL_TBL_INC 0x10
++#define PPE_MRU_MTU_CTRL_W0_MRU GENMASK(13, 0)
++#define PPE_MRU_MTU_CTRL_W0_MRU_CMD GENMASK(15, 14)
++#define PPE_MRU_MTU_CTRL_W0_MTU GENMASK(29, 16)
++#define PPE_MRU_MTU_CTRL_W0_MTU_CMD GENMASK(31, 30)
++#define PPE_MRU_MTU_CTRL_W1_RX_CNT_EN BIT(0)
++#define PPE_MRU_MTU_CTRL_W1_TX_CNT_EN BIT(1)
++#define PPE_MRU_MTU_CTRL_W1_SRC_PROFILE GENMASK(3, 2)
++#define PPE_MRU_MTU_CTRL_W1_INNER_PREC_LOW BIT(31)
++#define PPE_MRU_MTU_CTRL_W2_INNER_PREC_HIGH GENMASK(1, 0)
++
++#define PPE_MRU_MTU_CTRL_SET_MRU(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_MRU_MTU_CTRL_W0_MRU)
++#define PPE_MRU_MTU_CTRL_SET_MRU_CMD(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_MRU_MTU_CTRL_W0_MRU_CMD)
++#define PPE_MRU_MTU_CTRL_SET_MTU(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_MRU_MTU_CTRL_W0_MTU)
++#define PPE_MRU_MTU_CTRL_SET_MTU_CMD(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_MRU_MTU_CTRL_W0_MTU_CMD)
++#define PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_MRU_MTU_CTRL_W1_RX_CNT_EN)
++#define PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_MRU_MTU_CTRL_W1_TX_CNT_EN)
++
+ /* PPE service code configuration for destination port and counter. */
+ #define PPE_IN_L2_SERVICE_TBL_ADDR 0x66000
+ #define PPE_IN_L2_SERVICE_TBL_ENTRIES 256
--- /dev/null
+From 796be78fffeebe77237a6464da7ebe9807d670f0 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:44 +0800
+Subject: [PATCH] net: ethernet: qualcomm: Initialize PPE RSS hash settings
+
+PPE RSS hash is generated during PPE receive, based on the packet
+content (3 tuples or 5 tuples) and as per the configured RSS seed.
+The hash is then used to select the queue to transmit the packet
+to the ARM CPU.
+
+This patch initializes the RSS hash settings that are used to
+generate the hash for the packet during PPE packet receive.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../net/ethernet/qualcomm/ppe/ppe_config.c | 194 +++++++++++++++++-
+ .../net/ethernet/qualcomm/ppe/ppe_config.h | 39 ++++
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 40 ++++
+ 3 files changed, 272 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -1191,6 +1191,143 @@ int ppe_counter_enable_set(struct ppe_de
+ return regmap_set_bits(ppe_dev->regmap, reg, PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN);
+ }
+
++static int ppe_rss_hash_ipv4_config(struct ppe_device *ppe_dev, int index,
++ struct ppe_rss_hash_cfg cfg)
++{
++ u32 reg, val;
++
++ switch (index) {
++ case 0:
++ val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_sip_mix[0]);
++ break;
++ case 1:
++ val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_dip_mix[0]);
++ break;
++ case 2:
++ val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_protocol_mix);
++ break;
++ case 3:
++ val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_dport_mix);
++ break;
++ case 4:
++ val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_sport_mix);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ reg = PPE_RSS_HASH_MIX_IPV4_ADDR + index * PPE_RSS_HASH_MIX_IPV4_INC;
++
++ return regmap_write(ppe_dev->regmap, reg, val);
++}
++
++static int ppe_rss_hash_ipv6_config(struct ppe_device *ppe_dev, int index,
++ struct ppe_rss_hash_cfg cfg)
++{
++ u32 reg, val;
++
++ switch (index) {
++ case 0 ... 3:
++ val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_sip_mix[index]);
++ break;
++ case 4 ... 7:
++ val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_dip_mix[index - 4]);
++ break;
++ case 8:
++ val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_protocol_mix);
++ break;
++ case 9:
++ val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_dport_mix);
++ break;
++ case 10:
++ val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_sport_mix);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ reg = PPE_RSS_HASH_MIX_ADDR + index * PPE_RSS_HASH_MIX_INC;
++
++ return regmap_write(ppe_dev->regmap, reg, val);
++}
++
++/**
++ * ppe_rss_hash_config_set - Configure the PPE hash settings for the packet received.
++ * @ppe_dev: PPE device.
++ * @mode: Configure RSS hash for the packet type IPv4 and IPv6.
++ * @cfg: RSS hash configuration.
++ *
++ * PPE RSS hash settings are configured for the packet type IPv4 and IPv6.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
++ struct ppe_rss_hash_cfg cfg)
++{
++ u32 val, reg;
++ int i, ret;
++
++ if (mode & PPE_RSS_HASH_MODE_IPV4) {
++ val = FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_HASH_MASK, cfg.hash_mask);
++ val |= FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_FRAGMENT, cfg.hash_fragment_mode);
++ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_IPV4_ADDR, val);
++ if (ret)
++ return ret;
++
++ val = FIELD_PREP(PPE_RSS_HASH_SEED_IPV4_VAL, cfg.hash_seed);
++ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_IPV4_ADDR, val);
++ if (ret)
++ return ret;
++
++ for (i = 0; i < PPE_RSS_HASH_MIX_IPV4_ENTRIES; i++) {
++ ret = ppe_rss_hash_ipv4_config(ppe_dev, i, cfg);
++ if (ret)
++ return ret;
++ }
++
++ for (i = 0; i < PPE_RSS_HASH_FIN_IPV4_ENTRIES; i++) {
++ val = FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_INNER, cfg.hash_fin_inner[i]);
++ val |= FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_OUTER, cfg.hash_fin_outer[i]);
++ reg = PPE_RSS_HASH_FIN_IPV4_ADDR + i * PPE_RSS_HASH_FIN_IPV4_INC;
++
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ return ret;
++ }
++ }
++
++ if (mode & PPE_RSS_HASH_MODE_IPV6) {
++ val = FIELD_PREP(PPE_RSS_HASH_MASK_HASH_MASK, cfg.hash_mask);
++ val |= FIELD_PREP(PPE_RSS_HASH_MASK_FRAGMENT, cfg.hash_fragment_mode);
++ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_ADDR, val);
++ if (ret)
++ return ret;
++
++ val = FIELD_PREP(PPE_RSS_HASH_SEED_VAL, cfg.hash_seed);
++ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_ADDR, val);
++ if (ret)
++ return ret;
++
++ for (i = 0; i < PPE_RSS_HASH_MIX_ENTRIES; i++) {
++ ret = ppe_rss_hash_ipv6_config(ppe_dev, i, cfg);
++ if (ret)
++ return ret;
++ }
++
++ for (i = 0; i < PPE_RSS_HASH_FIN_ENTRIES; i++) {
++ val = FIELD_PREP(PPE_RSS_HASH_FIN_INNER, cfg.hash_fin_inner[i]);
++ val |= FIELD_PREP(PPE_RSS_HASH_FIN_OUTER, cfg.hash_fin_outer[i]);
++ reg = PPE_RSS_HASH_FIN_ADDR + i * PPE_RSS_HASH_FIN_INC;
++
++ ret = regmap_write(ppe_dev->regmap, reg, val);
++ if (ret)
++ return ret;
++ }
++ }
++
++ return 0;
++}
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+ const struct ppe_bm_port_config port_cfg)
+ {
+@@ -1659,6 +1796,57 @@ static int ppe_port_config_init(struct p
+ return ppe_counter_enable_set(ppe_dev, 0);
+ }
+
++/* Initialize the PPE RSS configuration for IPv4 and IPv6 packet receive.
++ * RSS settings are to calculate the random RSS hash value generated during
++ * packet receive. This hash is then used to generate the queue offset used
++ * to determine the queue used to transmit the packet.
++ */
++static int ppe_rss_hash_init(struct ppe_device *ppe_dev)
++{
++ u16 fins[PPE_RSS_HASH_TUPLES] = { 0x205, 0x264, 0x227, 0x245, 0x201 };
++ u8 ips[PPE_RSS_HASH_IP_LENGTH] = { 0x13, 0xb, 0x13, 0xb };
++ struct ppe_rss_hash_cfg hash_cfg;
++ int i, ret;
++
++ hash_cfg.hash_seed = get_random_u32();
++ hash_cfg.hash_mask = 0xfff;
++
++ /* Use 5 tuple as RSS hash key for the first fragment of TCP, UDP
++ * and UDP-Lite packets.
++ */
++ hash_cfg.hash_fragment_mode = false;
++
++ /* The final common seed configs used to calculate the RSS has value,
++ * which is available for both IPv4 and IPv6 packet.
++ */
++ for (i = 0; i < ARRAY_SIZE(fins); i++) {
++ hash_cfg.hash_fin_inner[i] = fins[i] & 0x1f;
++ hash_cfg.hash_fin_outer[i] = fins[i] >> 5;
++ }
++
++ /* RSS seeds for IP protocol, L4 destination & source port and
++ * destination & source IP used to calculate the RSS hash value.
++ */
++ hash_cfg.hash_protocol_mix = 0x13;
++ hash_cfg.hash_dport_mix = 0xb;
++ hash_cfg.hash_sport_mix = 0x13;
++ hash_cfg.hash_dip_mix[0] = 0xb;
++ hash_cfg.hash_sip_mix[0] = 0x13;
++
++ /* Configure RSS seed configs for IPv4 packet. */
++ ret = ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV4, hash_cfg);
++ if (ret)
++ return ret;
++
++ for (i = 0; i < ARRAY_SIZE(ips); i++) {
++ hash_cfg.hash_sip_mix[i] = ips[i];
++ hash_cfg.hash_dip_mix[i] = ips[i];
++ }
++
++ /* Configure RSS seed configs for IPv6 packet. */
++ return ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV6, hash_cfg);
++}
++
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+ {
+ int ret;
+@@ -1683,5 +1871,9 @@ int ppe_hw_config(struct ppe_device *ppe
+ if (ret)
+ return ret;
+
+- return ppe_port_config_init(ppe_dev);
++ ret = ppe_port_config_init(ppe_dev);
++ if (ret)
++ return ret;
++
++ return ppe_rss_hash_init(ppe_dev);
+ }
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -23,6 +23,12 @@
+ /* The service code is used by EDMA port to transmit packet to PPE. */
+ #define PPE_EDMA_SC_BYPASS_ID 1
+
++/* The PPE RSS hash configured for IPv4 and IPv6 packet separately. */
++#define PPE_RSS_HASH_MODE_IPV4 BIT(0)
++#define PPE_RSS_HASH_MODE_IPV6 BIT(1)
++#define PPE_RSS_HASH_IP_LENGTH 4
++#define PPE_RSS_HASH_TUPLES 5
++
+ /**
+ * enum ppe_scheduler_frame_mode - PPE scheduler frame mode.
+ * @PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC: The scheduled frame includes IPG,
+@@ -247,6 +253,37 @@ enum ppe_action_type {
+ PPE_ACTION_REDIRECT_TO_CPU = 3,
+ };
+
++/**
++ * struct ppe_rss_hash_cfg - PPE RSS hash configuration.
++ * @hash_mask: Mask of the generated hash value.
++ * @hash_fragment_mode: Hash generation mode for the first fragment of TCP,
++ * UDP and UDP-Lite packets, to use either 3 tuple or 5 tuple for RSS hash
++ * key computation.
++ * @hash_seed: Seed to generate RSS hash.
++ * @hash_sip_mix: Source IP selection.
++ * @hash_dip_mix: Destination IP selection.
++ * @hash_protocol_mix: Protocol selection.
++ * @hash_sport_mix: Source L4 port selection.
++ * @hash_dport_mix: Destination L4 port selection.
++ * @hash_fin_inner: RSS hash value first selection.
++ * @hash_fin_outer: RSS hash value second selection.
++ *
++ * PPE RSS hash value is generated for the packet based on the RSS hash
++ * configured.
++ */
++struct ppe_rss_hash_cfg {
++ u32 hash_mask;
++ bool hash_fragment_mode;
++ u32 hash_seed;
++ u8 hash_sip_mix[PPE_RSS_HASH_IP_LENGTH];
++ u8 hash_dip_mix[PPE_RSS_HASH_IP_LENGTH];
++ u8 hash_protocol_mix;
++ u8 hash_sport_mix;
++ u8 hash_dport_mix;
++ u8 hash_fin_inner[PPE_RSS_HASH_TUPLES];
++ u8 hash_fin_outer[PPE_RSS_HASH_TUPLES];
++};
++
+ int ppe_hw_config(struct ppe_device *ppe_dev);
+ int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int port,
+@@ -269,4 +306,6 @@ int ppe_port_resource_get(struct ppe_dev
+ int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc,
+ struct ppe_sc_cfg cfg);
+ int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port);
++int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
++ struct ppe_rss_hash_cfg hash_cfg);
+ #endif
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -16,6 +16,46 @@
+ #define PPE_BM_SCH_CTRL_SCH_OFFSET GENMASK(14, 8)
+ #define PPE_BM_SCH_CTRL_SCH_EN BIT(31)
+
++/* RSS settings are to calculate the random RSS hash value generated during
++ * packet receive to ARM cores. This hash is then used to generate the queue
++ * offset used to determine the queue used to transmit the packet to ARM cores.
++ */
++#define PPE_RSS_HASH_MASK_ADDR 0xb4318
++#define PPE_RSS_HASH_MASK_HASH_MASK GENMASK(20, 0)
++#define PPE_RSS_HASH_MASK_FRAGMENT BIT(28)
++
++#define PPE_RSS_HASH_SEED_ADDR 0xb431c
++#define PPE_RSS_HASH_SEED_VAL GENMASK(31, 0)
++
++#define PPE_RSS_HASH_MIX_ADDR 0xb4320
++#define PPE_RSS_HASH_MIX_ENTRIES 11
++#define PPE_RSS_HASH_MIX_INC 4
++#define PPE_RSS_HASH_MIX_VAL GENMASK(4, 0)
++
++#define PPE_RSS_HASH_FIN_ADDR 0xb4350
++#define PPE_RSS_HASH_FIN_ENTRIES 5
++#define PPE_RSS_HASH_FIN_INC 4
++#define PPE_RSS_HASH_FIN_INNER GENMASK(4, 0)
++#define PPE_RSS_HASH_FIN_OUTER GENMASK(9, 5)
++
++#define PPE_RSS_HASH_MASK_IPV4_ADDR 0xb4380
++#define PPE_RSS_HASH_MASK_IPV4_HASH_MASK GENMASK(20, 0)
++#define PPE_RSS_HASH_MASK_IPV4_FRAGMENT BIT(28)
++
++#define PPE_RSS_HASH_SEED_IPV4_ADDR 0xb4384
++#define PPE_RSS_HASH_SEED_IPV4_VAL GENMASK(31, 0)
++
++#define PPE_RSS_HASH_MIX_IPV4_ADDR 0xb4390
++#define PPE_RSS_HASH_MIX_IPV4_ENTRIES 5
++#define PPE_RSS_HASH_MIX_IPV4_INC 4
++#define PPE_RSS_HASH_MIX_IPV4_VAL GENMASK(4, 0)
++
++#define PPE_RSS_HASH_FIN_IPV4_ADDR 0xb43b0
++#define PPE_RSS_HASH_FIN_IPV4_ENTRIES 5
++#define PPE_RSS_HASH_FIN_IPV4_INC 4
++#define PPE_RSS_HASH_FIN_IPV4_INNER GENMASK(4, 0)
++#define PPE_RSS_HASH_FIN_IPV4_OUTER GENMASK(9, 5)
++
+ #define PPE_BM_SCH_CFG_TBL_ADDR 0xc000
+ #define PPE_BM_SCH_CFG_TBL_ENTRIES 128
+ #define PPE_BM_SCH_CFG_TBL_INC 0x10
--- /dev/null
+From c4a321bc120fabc318df165a7fcdeddfcf052253 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:45 +0800
+Subject: [PATCH] net: ethernet: qualcomm: Initialize PPE queue to Ethernet DMA
+ ring mapping
+
+Configure the selected queues to map with an Ethernet DMA ring for the
+packet to receive on ARM cores.
+
+As default initialization, all queues assigned to CPU port 0 are mapped
+to the EDMA ring 0. This configuration is later updated during Ethernet
+DMA initialization.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../net/ethernet/qualcomm/ppe/ppe_config.c | 47 ++++++++++++++++++-
+ .../net/ethernet/qualcomm/ppe/ppe_config.h | 6 +++
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 5 ++
+ 3 files changed, 57 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -1328,6 +1328,28 @@ int ppe_rss_hash_config_set(struct ppe_d
+ return 0;
+ }
+
++/**
++ * ppe_ring_queue_map_set - Set the PPE queue to Ethernet DMA ring mapping
++ * @ppe_dev: PPE device
++ * @ring_id: Ethernet DMA ring ID
++ * @queue_map: Bit map of queue IDs to given Ethernet DMA ring
++ *
++ * Configure the mapping from a set of PPE queues to a given Ethernet DMA ring.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int ppe_ring_queue_map_set(struct ppe_device *ppe_dev, int ring_id, u32 *queue_map)
++{
++ u32 reg, queue_bitmap_val[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT];
++
++ memcpy(queue_bitmap_val, queue_map, sizeof(queue_bitmap_val));
++ reg = PPE_RING_Q_MAP_TBL_ADDR + PPE_RING_Q_MAP_TBL_INC * ring_id;
++
++ return regmap_bulk_write(ppe_dev->regmap, reg,
++ queue_bitmap_val,
++ ARRAY_SIZE(queue_bitmap_val));
++}
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+ const struct ppe_bm_port_config port_cfg)
+ {
+@@ -1847,6 +1869,25 @@ static int ppe_rss_hash_init(struct ppe_
+ return ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV6, hash_cfg);
+ }
+
++/* Initialize mapping between PPE queues assigned to CPU port 0
++ * to Ethernet DMA ring 0.
++ */
++static int ppe_queues_to_ring_init(struct ppe_device *ppe_dev)
++{
++ u32 queue_bmap[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT] = {};
++ int ret, queue_id, queue_max;
++
++ ret = ppe_port_resource_get(ppe_dev, 0, PPE_RES_UCAST,
++ &queue_id, &queue_max);
++ if (ret)
++ return ret;
++
++ for (; queue_id <= queue_max; queue_id++)
++ queue_bmap[queue_id / 32] |= BIT_MASK(queue_id % 32);
++
++ return ppe_ring_queue_map_set(ppe_dev, 0, queue_bmap);
++}
++
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+ {
+ int ret;
+@@ -1875,5 +1916,9 @@ int ppe_hw_config(struct ppe_device *ppe
+ if (ret)
+ return ret;
+
+- return ppe_rss_hash_init(ppe_dev);
++ ret = ppe_rss_hash_init(ppe_dev);
++ if (ret)
++ return ret;
++
++ return ppe_queues_to_ring_init(ppe_dev);
+ }
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -29,6 +29,9 @@
+ #define PPE_RSS_HASH_IP_LENGTH 4
+ #define PPE_RSS_HASH_TUPLES 5
+
++/* PPE supports 300 queues, each bit presents as one queue. */
++#define PPE_RING_TO_QUEUE_BITMAP_WORD_CNT 10
++
+ /**
+ * enum ppe_scheduler_frame_mode - PPE scheduler frame mode.
+ * @PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC: The scheduled frame includes IPG,
+@@ -308,4 +311,7 @@ int ppe_sc_config_set(struct ppe_device
+ int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port);
+ int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
+ struct ppe_rss_hash_cfg hash_cfg);
++int ppe_ring_queue_map_set(struct ppe_device *ppe_dev,
++ int ring_id,
++ u32 *queue_map);
+ #endif
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -207,6 +207,11 @@
+ #define PPE_L0_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
+ #define PPE_L0_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
+
++/* PPE queue to Ethernet DMA ring mapping table. */
++#define PPE_RING_Q_MAP_TBL_ADDR 0x42a000
++#define PPE_RING_Q_MAP_TBL_ENTRIES 24
++#define PPE_RING_Q_MAP_TBL_INC 0x40
++
+ /* Table addresses for per-queue dequeue setting. */
+ #define PPE_DEQ_OPR_TBL_ADDR 0x430000
+ #define PPE_DEQ_OPR_TBL_ENTRIES 300
--- /dev/null
+From cf7282d1e5712953516fa1cc0ffaae405491b3ca Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:46 +0800
+Subject: [PATCH] net: ethernet: qualcomm: Initialize PPE L2 bridge settings
+
+Initialize the L2 bridge settings for the PPE ports to only enable
+L2 frame forwarding between CPU port and PPE Ethernet ports.
+
+The per-port L2 bridge settings are initialized as follows:
+For PPE CPU port, the PPE bridge TX is enabled and FDB learning is
+disabled. For PPE physical ports, the default L2 forwarding action
+is initialized to forward to CPU port only.
+
+L2/FDB learning and forwarding will not be enabled for PPE physical
+ports yet, since the port's VSI (Virtual Switch Instance) and VSI
+membership are not yet configured, which are required for FDB
+forwarding. The VSI and FDB forwarding will later be enabled when
+switchdev is enabled.
+
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../net/ethernet/qualcomm/ppe/ppe_config.c | 80 ++++++++++++++++++-
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 50 ++++++++++++
+ 2 files changed, 129 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -1888,6 +1888,80 @@ static int ppe_queues_to_ring_init(struc
+ return ppe_ring_queue_map_set(ppe_dev, 0, queue_bmap);
+ }
+
++/* Initialize PPE bridge settings to only enable L2 frame receive and
++ * transmit between CPU port and PPE Ethernet ports.
++ */
++static int ppe_bridge_init(struct ppe_device *ppe_dev)
++{
++ u32 reg, mask, port_cfg[4], vsi_cfg[2];
++ int ret, i;
++
++ /* Configure the following settings for CPU port0:
++ * a.) Enable Bridge TX
++ * b.) Disable FDB new address learning
++ * c.) Disable station move address learning
++ */
++ mask = PPE_PORT_BRIDGE_TXMAC_EN;
++ mask |= PPE_PORT_BRIDGE_NEW_LRN_EN;
++ mask |= PPE_PORT_BRIDGE_STA_MOVE_LRN_EN;
++ ret = regmap_update_bits(ppe_dev->regmap,
++ PPE_PORT_BRIDGE_CTRL_ADDR,
++ mask,
++ PPE_PORT_BRIDGE_TXMAC_EN);
++ if (ret)
++ return ret;
++
++ for (i = 1; i < ppe_dev->num_ports; i++) {
++ /* Enable invalid VSI forwarding for all the physical ports
++ * to CPU port0, in case no VSI is assigned to the physical
++ * port.
++ */
++ reg = PPE_L2_VP_PORT_TBL_ADDR + PPE_L2_VP_PORT_TBL_INC * i;
++ ret = regmap_bulk_read(ppe_dev->regmap, reg,
++ port_cfg, ARRAY_SIZE(port_cfg));
++
++ if (ret)
++ return ret;
++
++ PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(port_cfg, true);
++ PPE_L2_PORT_SET_DST_INFO(port_cfg, 0);
++
++ ret = regmap_bulk_write(ppe_dev->regmap, reg,
++ port_cfg, ARRAY_SIZE(port_cfg));
++ if (ret)
++ return ret;
++ }
++
++ for (i = 0; i < PPE_VSI_TBL_ENTRIES; i++) {
++ /* Set the VSI forward membership to include only CPU port0.
++ * FDB learning and forwarding take place only after switchdev
++ * is supported later to create the VSI and join the physical
++ * ports to the VSI port member.
++ */
++ reg = PPE_VSI_TBL_ADDR + PPE_VSI_TBL_INC * i;
++ ret = regmap_bulk_read(ppe_dev->regmap, reg,
++ vsi_cfg, ARRAY_SIZE(vsi_cfg));
++ if (ret)
++ return ret;
++
++ PPE_VSI_SET_MEMBER_PORT_BITMAP(vsi_cfg, BIT(0));
++ PPE_VSI_SET_UUC_BITMAP(vsi_cfg, BIT(0));
++ PPE_VSI_SET_UMC_BITMAP(vsi_cfg, BIT(0));
++ PPE_VSI_SET_BC_BITMAP(vsi_cfg, BIT(0));
++ PPE_VSI_SET_NEW_ADDR_LRN_EN(vsi_cfg, true);
++ PPE_VSI_SET_NEW_ADDR_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
++ PPE_VSI_SET_STATION_MOVE_LRN_EN(vsi_cfg, true);
++ PPE_VSI_SET_STATION_MOVE_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
++
++ ret = regmap_bulk_write(ppe_dev->regmap, reg,
++ vsi_cfg, ARRAY_SIZE(vsi_cfg));
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+ {
+ int ret;
+@@ -1920,5 +1994,9 @@ int ppe_hw_config(struct ppe_device *ppe
+ if (ret)
+ return ret;
+
+- return ppe_queues_to_ring_init(ppe_dev);
++ ret = ppe_queues_to_ring_init(ppe_dev);
++ if (ret)
++ return ret;
++
++ return ppe_bridge_init(ppe_dev);
+ }
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -117,6 +117,14 @@
+ #define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value) \
+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_TX_CNT_EN)
+
++/* PPE port bridge configuration */
++#define PPE_PORT_BRIDGE_CTRL_ADDR 0x60300
++#define PPE_PORT_BRIDGE_CTRL_ENTRIES 8
++#define PPE_PORT_BRIDGE_CTRL_INC 4
++#define PPE_PORT_BRIDGE_NEW_LRN_EN BIT(0)
++#define PPE_PORT_BRIDGE_STA_MOVE_LRN_EN BIT(3)
++#define PPE_PORT_BRIDGE_TXMAC_EN BIT(16)
++
+ /* PPE port control configurations for the traffic to the multicast queues. */
+ #define PPE_MC_MTU_CTRL_TBL_ADDR 0x60a00
+ #define PPE_MC_MTU_CTRL_TBL_ENTRIES 8
+@@ -125,6 +133,36 @@
+ #define PPE_MC_MTU_CTRL_TBL_MTU_CMD GENMASK(15, 14)
+ #define PPE_MC_MTU_CTRL_TBL_TX_CNT_EN BIT(16)
+
++/* PPE VSI configurations */
++#define PPE_VSI_TBL_ADDR 0x63800
++#define PPE_VSI_TBL_ENTRIES 64
++#define PPE_VSI_TBL_INC 0x10
++#define PPE_VSI_W0_MEMBER_PORT_BITMAP GENMASK(7, 0)
++#define PPE_VSI_W0_UUC_BITMAP GENMASK(15, 8)
++#define PPE_VSI_W0_UMC_BITMAP GENMASK(23, 16)
++#define PPE_VSI_W0_BC_BITMAP GENMASK(31, 24)
++#define PPE_VSI_W1_NEW_ADDR_LRN_EN BIT(0)
++#define PPE_VSI_W1_NEW_ADDR_FWD_CMD GENMASK(2, 1)
++#define PPE_VSI_W1_STATION_MOVE_LRN_EN BIT(3)
++#define PPE_VSI_W1_STATION_MOVE_FWD_CMD GENMASK(5, 4)
++
++#define PPE_VSI_SET_MEMBER_PORT_BITMAP(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_VSI_W0_MEMBER_PORT_BITMAP)
++#define PPE_VSI_SET_UUC_BITMAP(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_VSI_W0_UUC_BITMAP)
++#define PPE_VSI_SET_UMC_BITMAP(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_VSI_W0_UMC_BITMAP)
++#define PPE_VSI_SET_BC_BITMAP(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_VSI_W0_BC_BITMAP)
++#define PPE_VSI_SET_NEW_ADDR_LRN_EN(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_VSI_W1_NEW_ADDR_LRN_EN)
++#define PPE_VSI_SET_NEW_ADDR_FWD_CMD(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_VSI_W1_NEW_ADDR_FWD_CMD)
++#define PPE_VSI_SET_STATION_MOVE_LRN_EN(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_VSI_W1_STATION_MOVE_LRN_EN)
++#define PPE_VSI_SET_STATION_MOVE_FWD_CMD(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_VSI_W1_STATION_MOVE_FWD_CMD)
++
+ /* PPE port control configurations for the traffic to the unicast queues. */
+ #define PPE_MRU_MTU_CTRL_TBL_ADDR 0x65000
+ #define PPE_MRU_MTU_CTRL_TBL_ENTRIES 256
+@@ -163,6 +201,18 @@
+ #define PPE_IN_L2_SERVICE_TBL_RX_CNT_EN BIT(30)
+ #define PPE_IN_L2_SERVICE_TBL_TX_CNT_EN BIT(31)
+
++/* L2 Port configurations */
++#define PPE_L2_VP_PORT_TBL_ADDR 0x98000
++#define PPE_L2_VP_PORT_TBL_ENTRIES 256
++#define PPE_L2_VP_PORT_TBL_INC 0x10
++#define PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN BIT(0)
++#define PPE_L2_VP_PORT_W0_DST_INFO GENMASK(9, 2)
++
++#define PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN)
++#define PPE_L2_PORT_SET_DST_INFO(tbl_cfg, value) \
++ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_L2_VP_PORT_W0_DST_INFO)
++
+ /* PPE service code configuration for the tunnel packet. */
+ #define PPE_TL_SERVICE_TBL_ADDR 0x306000
+ #define PPE_TL_SERVICE_TBL_ENTRIES 256
--- /dev/null
+From fc25088f79cccb934d69e563221068589565926f Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:47 +0800
+Subject: [PATCH] net: ethernet: qualcomm: Add PPE debugfs support for PPE
+ counters
+
+The PPE hardware counters maintain counters for packets handled by
+the various functional blocks of PPE. They help in tracing the packets
+passed through PPE and debugging any packet drops.
+
+The counters displayed by this debugfs file are ones that are common
+for all Ethernet ports, and they do not include the counters that are
+specific for a MAC port. Hence they cannot be displayed using ethtool.
+The per-MAC counters will be supported using "ethtool -S" along with
+the netdevice driver.
+
+The PPE hardware packet counters are made available through
+the debugfs entry "/sys/kernel/debug/ppe/packet_counters".
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
+ drivers/net/ethernet/qualcomm/ppe/ppe.c | 11 +
+ drivers/net/ethernet/qualcomm/ppe/ppe.h | 3 +
+ .../net/ethernet/qualcomm/ppe/ppe_debugfs.c | 692 ++++++++++++++++++
+ .../net/ethernet/qualcomm/ppe/ppe_debugfs.h | 16 +
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 102 +++
+ 6 files changed, 825 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
+
+--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -4,4 +4,4 @@
+ #
+
+ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+-qcom-ppe-objs := ppe.o ppe_config.o
++qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+@@ -16,6 +16,7 @@
+
+ #include "ppe.h"
+ #include "ppe_config.h"
++#include "ppe_debugfs.h"
+
+ #define PPE_PORT_MAX 8
+ #define PPE_CLK_RATE 353000000
+@@ -199,11 +200,20 @@ static int qcom_ppe_probe(struct platfor
+ if (ret)
+ return dev_err_probe(dev, ret, "PPE HW config failed\n");
+
++ ppe_debugfs_setup(ppe_dev);
+ platform_set_drvdata(pdev, ppe_dev);
+
+ return 0;
+ }
+
++static void qcom_ppe_remove(struct platform_device *pdev)
++{
++ struct ppe_device *ppe_dev;
++
++ ppe_dev = platform_get_drvdata(pdev);
++ ppe_debugfs_teardown(ppe_dev);
++}
++
+ static const struct of_device_id qcom_ppe_of_match[] = {
+ { .compatible = "qcom,ipq9574-ppe" },
+ {}
+@@ -216,6 +226,7 @@ static struct platform_driver qcom_ppe_d
+ .of_match_table = qcom_ppe_of_match,
+ },
+ .probe = qcom_ppe_probe,
++ .remove = qcom_ppe_remove,
+ };
+ module_platform_driver(qcom_ppe_driver);
+
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.h
+@@ -11,6 +11,7 @@
+
+ struct device;
+ struct regmap;
++struct dentry;
+
+ /**
+ * struct ppe_device - PPE device private data.
+@@ -18,6 +19,7 @@ struct regmap;
+ * @regmap: PPE register map.
+ * @clk_rate: PPE clock rate.
+ * @num_ports: Number of PPE ports.
++ * @debugfs_root: Debugfs root entry.
+ * @num_icc_paths: Number of interconnect paths.
+ * @icc_paths: Interconnect path array.
+ *
+@@ -30,6 +32,7 @@ struct ppe_device {
+ struct regmap *regmap;
+ unsigned long clk_rate;
+ unsigned int num_ports;
++ struct dentry *debugfs_root;
+ unsigned int num_icc_paths;
+ struct icc_bulk_data icc_paths[] __counted_by(num_icc_paths);
+ };
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
+@@ -0,0 +1,692 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* PPE debugfs routines for display of PPE counters useful for debug. */
++
++#include <linux/bitfield.h>
++#include <linux/debugfs.h>
++#include <linux/regmap.h>
++#include <linux/seq_file.h>
++
++#include "ppe.h"
++#include "ppe_config.h"
++#include "ppe_debugfs.h"
++#include "ppe_regs.h"
++
++#define PPE_PKT_CNT_TBL_SIZE 3
++#define PPE_DROP_PKT_CNT_TBL_SIZE 5
++
++#define PPE_W0_PKT_CNT GENMASK(31, 0)
++#define PPE_W2_DROP_PKT_CNT_LOW GENMASK(31, 8)
++#define PPE_W3_DROP_PKT_CNT_HIGH GENMASK(7, 0)
++
++#define PPE_GET_PKT_CNT(tbl_cnt) \
++ u32_get_bits(*((u32 *)(tbl_cnt)), PPE_W0_PKT_CNT)
++#define PPE_GET_DROP_PKT_CNT_LOW(tbl_cnt) \
++ u32_get_bits(*((u32 *)(tbl_cnt) + 0x2), PPE_W2_DROP_PKT_CNT_LOW)
++#define PPE_GET_DROP_PKT_CNT_HIGH(tbl_cnt) \
++ u32_get_bits(*((u32 *)(tbl_cnt) + 0x3), PPE_W3_DROP_PKT_CNT_HIGH)
++
++#define PRINT_COUNTER_PREFIX(desc, cnt_type) \
++ seq_printf(seq, "%-16s %16s", desc, cnt_type)
++
++#define PRINT_CPU_CODE_COUNTER(cnt, code) \
++ seq_printf(seq, "%10u(cpucode:%d)", cnt, code)
++
++#define PRINT_DROP_CODE_COUNTER(cnt, port, code) \
++ seq_printf(seq, "%10u(port=%d),dropcode:%d", cnt, port, code)
++
++#define PRINT_SINGLE_COUNTER(tag, cnt, str, index) \
++do { \
++ if (!((tag) % 4)) \
++ seq_printf(seq, "\n%-16s %16s", "", ""); \
++ seq_printf(seq, "%10u(%s=%04d)", cnt, str, index); \
++} while (0)
++
++#define PRINT_TWO_COUNTERS(tag, cnt0, cnt1, str, index) \
++do { \
++ if (!((tag) % 4)) \
++ seq_printf(seq, "\n%-16s %16s", "", ""); \
++ seq_printf(seq, "%10u/%u(%s=%04d)", cnt0, cnt1, str, index); \
++} while (0)
++
++/**
++ * enum ppe_cnt_size_type - PPE counter size type
++ * @PPE_PKT_CNT_SIZE_1WORD: Counter size with single register
++ * @PPE_PKT_CNT_SIZE_3WORD: Counter size with table of 3 words
++ * @PPE_PKT_CNT_SIZE_5WORD: Counter size with table of 5 words
++ *
++ * PPE takes the different register size to record the packet counters.
++ * It uses single register, or register table with 3 words or 5 words.
++ * The counter with table size 5 words also records the drop counter.
++ * There are also some other counter types occupying sizes less than 32
++ * bits, which is not covered by this enumeration type.
++ */
++enum ppe_cnt_size_type {
++ PPE_PKT_CNT_SIZE_1WORD,
++ PPE_PKT_CNT_SIZE_3WORD,
++ PPE_PKT_CNT_SIZE_5WORD,
++};
++
++static int ppe_pkt_cnt_get(struct ppe_device *ppe_dev, u32 reg,
++ enum ppe_cnt_size_type cnt_type,
++ u32 *cnt, u32 *drop_cnt)
++{
++ u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE];
++ u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE];
++ u32 value;
++ int ret;
++
++ switch (cnt_type) {
++ case PPE_PKT_CNT_SIZE_1WORD:
++ ret = regmap_read(ppe_dev->regmap, reg, &value);
++ if (ret)
++ return ret;
++
++ *cnt = value;
++ break;
++ case PPE_PKT_CNT_SIZE_3WORD:
++ ret = regmap_bulk_read(ppe_dev->regmap, reg,
++ pkt_cnt, ARRAY_SIZE(pkt_cnt));
++ if (ret)
++ return ret;
++
++ *cnt = PPE_GET_PKT_CNT(pkt_cnt);
++ break;
++ case PPE_PKT_CNT_SIZE_5WORD:
++ ret = regmap_bulk_read(ppe_dev->regmap, reg,
++ drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
++ if (ret)
++ return ret;
++
++ *cnt = PPE_GET_PKT_CNT(drop_pkt_cnt);
++
++ /* Drop counter with low 24 bits. */
++ value = PPE_GET_DROP_PKT_CNT_LOW(drop_pkt_cnt);
++ *drop_cnt = FIELD_PREP(GENMASK(23, 0), value);
++
++ /* Drop counter with high 8 bits. */
++ value = PPE_GET_DROP_PKT_CNT_HIGH(drop_pkt_cnt);
++ *drop_cnt |= FIELD_PREP(GENMASK(31, 24), value);
++ break;
++ }
++
++ return 0;
++}
++
++static void ppe_tbl_pkt_cnt_clear(struct ppe_device *ppe_dev, u32 reg,
++ enum ppe_cnt_size_type cnt_type)
++{
++ u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE] = {};
++ u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE] = {};
++
++ switch (cnt_type) {
++ case PPE_PKT_CNT_SIZE_1WORD:
++ regmap_write(ppe_dev->regmap, reg, 0);
++ break;
++ case PPE_PKT_CNT_SIZE_3WORD:
++ regmap_bulk_write(ppe_dev->regmap, reg,
++ pkt_cnt, ARRAY_SIZE(pkt_cnt));
++ break;
++ case PPE_PKT_CNT_SIZE_5WORD:
++ regmap_bulk_write(ppe_dev->regmap, reg,
++ drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
++ break;
++ }
++}
++
++/* The number of packets dropped because of no buffer available, no PPE
++ * buffer assigned to these packets.
++ */
++static void ppe_port_rx_drop_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ u32 reg, drop_cnt = 0;
++ int ret, i, tag = 0;
++
++ PRINT_COUNTER_PREFIX("PRX_DROP_CNT", "SILENT_DROP:");
++ for (i = 0; i < PPE_DROP_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_DROP_CNT_TBL_ADDR + i * PPE_DROP_CNT_TBL_INC;
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
++ &drop_cnt, NULL);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ if (drop_cnt > 0) {
++ tag++;
++ PRINT_SINGLE_COUNTER(tag, drop_cnt, "port", i);
++ }
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* The number of packets dropped because hardware buffers were available
++ * only partially for the packet.
++ */
++static void ppe_port_rx_bm_drop_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ u32 reg, pkt_cnt = 0;
++ int ret, i, tag = 0;
++
++ PRINT_COUNTER_PREFIX("PRX_BM_DROP_CNT", "OVERFLOW_DROP:");
++ for (i = 0; i < PPE_DROP_STAT_TBL_ENTRIES; i++) {
++ reg = PPE_DROP_STAT_TBL_ADDR + PPE_DROP_STAT_TBL_INC * i;
++
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++ &pkt_cnt, NULL);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ if (pkt_cnt > 0) {
++ tag++;
++ PRINT_SINGLE_COUNTER(tag, pkt_cnt, "port", i);
++ }
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* The number of currently occupied buffers, that can't be flushed. */
++static void ppe_port_rx_bm_port_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ int used_cnt, react_cnt;
++ int ret, i, tag = 0;
++ u32 reg, val;
++
++ PRINT_COUNTER_PREFIX("PRX_BM_PORT_CNT", "USED/REACT:");
++ for (i = 0; i < PPE_BM_USED_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_BM_USED_CNT_TBL_ADDR + i * PPE_BM_USED_CNT_TBL_INC;
++ ret = regmap_read(ppe_dev->regmap, reg, &val);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ /* The number of PPE buffers used for caching the received
++ * packets before the pause frame sent.
++ */
++ used_cnt = FIELD_GET(PPE_BM_USED_CNT_VAL, val);
++
++ reg = PPE_BM_REACT_CNT_TBL_ADDR + i * PPE_BM_REACT_CNT_TBL_INC;
++ ret = regmap_read(ppe_dev->regmap, reg, &val);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ /* The number of PPE buffers used for caching the received
++ * packets after pause frame sent out.
++ */
++ react_cnt = FIELD_GET(PPE_BM_REACT_CNT_VAL, val);
++
++ if (used_cnt > 0 || react_cnt > 0) {
++ tag++;
++ PRINT_TWO_COUNTERS(tag, used_cnt, react_cnt, "port", i);
++ }
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* The number of packets processed by the ingress parser module of PPE. */
++static void ppe_parse_pkt_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ u32 reg, cnt = 0, tunnel_cnt = 0;
++ int i, ret, tag = 0;
++
++ PRINT_COUNTER_PREFIX("IPR_PKT_CNT", "TPRX/IPRX:");
++ for (i = 0; i < PPE_IPR_PKT_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_TPR_PKT_CNT_TBL_ADDR + i * PPE_TPR_PKT_CNT_TBL_INC;
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
++ &tunnel_cnt, NULL);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ reg = PPE_IPR_PKT_CNT_TBL_ADDR + i * PPE_IPR_PKT_CNT_TBL_INC;
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
++ &cnt, NULL);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ if (tunnel_cnt > 0 || cnt > 0) {
++ tag++;
++ PRINT_TWO_COUNTERS(tag, tunnel_cnt, cnt, "port", i);
++ }
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* The number of packets received or dropped on the ingress direction. */
++static void ppe_port_rx_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ u32 reg, pkt_cnt = 0, drop_cnt = 0;
++ int ret, i, tag = 0;
++
++ PRINT_COUNTER_PREFIX("PORT_RX_CNT", "RX/RX_DROP:");
++ for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
++ &pkt_cnt, &drop_cnt);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ if (pkt_cnt > 0) {
++ tag++;
++ PRINT_TWO_COUNTERS(tag, pkt_cnt, drop_cnt, "port", i);
++ }
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* The number of packets received or dropped by the port. */
++static void ppe_vp_rx_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ u32 reg, pkt_cnt = 0, drop_cnt = 0;
++ int ret, i, tag = 0;
++
++ PRINT_COUNTER_PREFIX("VPORT_RX_CNT", "RX/RX_DROP:");
++ for (i = 0; i < PPE_PORT_RX_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
++ &pkt_cnt, &drop_cnt);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ if (pkt_cnt > 0) {
++ tag++;
++ PRINT_TWO_COUNTERS(tag, pkt_cnt, drop_cnt, "port", i);
++ }
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* The number of packets received or dropped by layer 2 processing. */
++static void ppe_pre_l2_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ u32 reg, pkt_cnt = 0, drop_cnt = 0;
++ int ret, i, tag = 0;
++
++ PRINT_COUNTER_PREFIX("PRE_L2_CNT", "RX/RX_DROP:");
++ for (i = 0; i < PPE_PRE_L2_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
++ &pkt_cnt, &drop_cnt);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ if (pkt_cnt > 0) {
++ tag++;
++ PRINT_TWO_COUNTERS(tag, pkt_cnt, drop_cnt, "vsi", i);
++ }
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* The number of VLAN packets received by PPE. */
++static void ppe_vlan_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ u32 reg, pkt_cnt = 0;
++ int ret, i, tag = 0;
++
++ PRINT_COUNTER_PREFIX("VLAN_CNT", "RX:");
++ for (i = 0; i < PPE_VLAN_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
++
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++ &pkt_cnt, NULL);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ if (pkt_cnt > 0) {
++ tag++;
++ PRINT_SINGLE_COUNTER(tag, pkt_cnt, "vsi", i);
++ }
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* The number of packets handed to CPU by PPE. */
++static void ppe_cpu_code_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ u32 reg, pkt_cnt = 0;
++ int ret, i;
++
++ PRINT_COUNTER_PREFIX("CPU_CODE_CNT", "CODE:");
++ for (i = 0; i < PPE_DROP_CPU_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
++
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++ &pkt_cnt, NULL);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ if (!pkt_cnt)
++ continue;
++
++ /* There are 256 CPU codes saved in the first 256 entries
++ * of register table, and 128 drop codes for each PPE port
++ * (0-7), the total entries is 256 + 8 * 128.
++ */
++ if (i < 256)
++ PRINT_CPU_CODE_COUNTER(pkt_cnt, i);
++ else
++ PRINT_DROP_CODE_COUNTER(pkt_cnt, (i - 256) % 8,
++ (i - 256) / 8);
++ seq_putc(seq, '\n');
++ PRINT_COUNTER_PREFIX("", "");
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* The number of packets forwarded by VLAN on the egress direction. */
++static void ppe_eg_vsi_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ u32 reg, pkt_cnt = 0;
++ int ret, i, tag = 0;
++
++ PRINT_COUNTER_PREFIX("EG_VSI_CNT", "TX:");
++ for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_ENTRIES; i++) {
++ reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
++
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++ &pkt_cnt, NULL);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ if (pkt_cnt > 0) {
++ tag++;
++ PRINT_SINGLE_COUNTER(tag, pkt_cnt, "vsi", i);
++ }
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* The number of packets trasmitted or dropped by port. */
++static void ppe_vp_tx_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ u32 reg, pkt_cnt = 0, drop_cnt = 0;
++ int ret, i, tag = 0;
++
++ PRINT_COUNTER_PREFIX("VPORT_TX_CNT", "TX/TX_DROP:");
++ for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_ENTRIES; i++) {
++ reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++ &pkt_cnt, NULL);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++ &drop_cnt, NULL);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ if (pkt_cnt > 0 || drop_cnt > 0) {
++ tag++;
++ PRINT_TWO_COUNTERS(tag, pkt_cnt, drop_cnt, "port", i);
++ }
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* The number of packets trasmitted or dropped on the egress direction. */
++static void ppe_port_tx_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ u32 reg, pkt_cnt = 0, drop_cnt = 0;
++ int ret, i, tag = 0;
++
++ PRINT_COUNTER_PREFIX("PORT_TX_CNT", "TX/TX_DROP:");
++ for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_ENTRIES; i++) {
++ reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++ &pkt_cnt, NULL);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++ &drop_cnt, NULL);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ if (pkt_cnt > 0 || drop_cnt > 0) {
++ tag++;
++ PRINT_TWO_COUNTERS(tag, pkt_cnt, drop_cnt, "port", i);
++ }
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* The number of packets transmitted or pending by the PPE queue. */
++static void ppe_queue_tx_counter_get(struct ppe_device *ppe_dev,
++ struct seq_file *seq)
++{
++ u32 reg, val, pkt_cnt = 0, pend_cnt = 0;
++ int ret, i, tag = 0;
++
++ PRINT_COUNTER_PREFIX("QUEUE_TX_CNT", "TX/PEND:");
++ for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_ENTRIES; i++) {
++ reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
++ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++ &pkt_cnt, NULL);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ if (i < PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES) {
++ reg = PPE_AC_UNICAST_QUEUE_CNT_TBL_ADDR +
++ PPE_AC_UNICAST_QUEUE_CNT_TBL_INC * i;
++ ret = regmap_read(ppe_dev->regmap, reg, &val);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ pend_cnt = FIELD_GET(PPE_AC_UNICAST_QUEUE_CNT_TBL_PEND_CNT, val);
++ } else {
++ reg = PPE_AC_MULTICAST_QUEUE_CNT_TBL_ADDR +
++ PPE_AC_MULTICAST_QUEUE_CNT_TBL_INC *
++ (i - PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES);
++ ret = regmap_read(ppe_dev->regmap, reg, &val);
++ if (ret) {
++ seq_printf(seq, "ERROR %d\n", ret);
++ return;
++ }
++
++ pend_cnt = FIELD_GET(PPE_AC_MULTICAST_QUEUE_CNT_TBL_PEND_CNT, val);
++ }
++
++ if (pkt_cnt > 0 || pend_cnt > 0) {
++ tag++;
++ PRINT_TWO_COUNTERS(tag, pkt_cnt, pend_cnt, "queue", i);
++ }
++ }
++
++ seq_putc(seq, '\n');
++}
++
++/* Display the various packet counters of PPE. */
++static int ppe_packet_counter_show(struct seq_file *seq, void *v)
++{
++ struct ppe_device *ppe_dev = seq->private;
++
++ ppe_port_rx_drop_counter_get(ppe_dev, seq);
++ ppe_port_rx_bm_drop_counter_get(ppe_dev, seq);
++ ppe_port_rx_bm_port_counter_get(ppe_dev, seq);
++ ppe_parse_pkt_counter_get(ppe_dev, seq);
++ ppe_port_rx_counter_get(ppe_dev, seq);
++ ppe_vp_rx_counter_get(ppe_dev, seq);
++ ppe_pre_l2_counter_get(ppe_dev, seq);
++ ppe_vlan_counter_get(ppe_dev, seq);
++ ppe_cpu_code_counter_get(ppe_dev, seq);
++ ppe_eg_vsi_counter_get(ppe_dev, seq);
++ ppe_vp_tx_counter_get(ppe_dev, seq);
++ ppe_port_tx_counter_get(ppe_dev, seq);
++ ppe_queue_tx_counter_get(ppe_dev, seq);
++
++ return 0;
++}
++
++static int ppe_packet_counter_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, ppe_packet_counter_show, inode->i_private);
++}
++
++static ssize_t ppe_packet_counter_clear(struct file *file,
++ const char __user *buf,
++ size_t count, loff_t *pos)
++{
++ struct ppe_device *ppe_dev = file_inode(file)->i_private;
++ u32 reg;
++ int i;
++
++ for (i = 0; i < PPE_DROP_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_DROP_CNT_TBL_ADDR + i * PPE_DROP_CNT_TBL_INC;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
++ }
++
++ for (i = 0; i < PPE_DROP_STAT_TBL_ENTRIES; i++) {
++ reg = PPE_DROP_STAT_TBL_ADDR + PPE_DROP_STAT_TBL_INC * i;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++ }
++
++ for (i = 0; i < PPE_IPR_PKT_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_IPR_PKT_CNT_TBL_ADDR + i * PPE_IPR_PKT_CNT_TBL_INC;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
++
++ reg = PPE_TPR_PKT_CNT_TBL_ADDR + i * PPE_TPR_PKT_CNT_TBL_INC;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
++ }
++
++ for (i = 0; i < PPE_VLAN_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++ }
++
++ for (i = 0; i < PPE_PRE_L2_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
++ }
++
++ for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_ENTRIES; i++) {
++ reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++
++ reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++ }
++
++ for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_ENTRIES; i++) {
++ reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++ }
++
++ for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_ENTRIES; i++) {
++ reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++
++ reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++ }
++
++ for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_ENTRIES; i++) {
++ reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++ }
++
++ ppe_tbl_pkt_cnt_clear(ppe_dev, PPE_EPE_DBG_IN_CNT_ADDR, PPE_PKT_CNT_SIZE_1WORD);
++ ppe_tbl_pkt_cnt_clear(ppe_dev, PPE_EPE_DBG_OUT_CNT_ADDR, PPE_PKT_CNT_SIZE_1WORD);
++
++ for (i = 0; i < PPE_DROP_CPU_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++ }
++
++ for (i = 0; i < PPE_PORT_RX_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
++ }
++
++ for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_ENTRIES; i++) {
++ reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
++ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
++ }
++
++ return count;
++}
++
++static const struct file_operations ppe_debugfs_packet_counter_fops = {
++ .owner = THIS_MODULE,
++ .open = ppe_packet_counter_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++ .write = ppe_packet_counter_clear,
++};
++
++void ppe_debugfs_setup(struct ppe_device *ppe_dev)
++{
++ ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
++ debugfs_create_file("packet_counters", 0444,
++ ppe_dev->debugfs_root,
++ ppe_dev,
++ &ppe_debugfs_packet_counter_fops);
++}
++
++void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
++{
++ debugfs_remove_recursive(ppe_dev->debugfs_root);
++ ppe_dev->debugfs_root = NULL;
++}
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
+@@ -0,0 +1,16 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ *
++ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* PPE debugfs counters setup. */
++
++#ifndef __PPE_DEBUGFS_H__
++#define __PPE_DEBUGFS_H__
++
++#include "ppe.h"
++
++void ppe_debugfs_setup(struct ppe_device *ppe_dev);
++void ppe_debugfs_teardown(struct ppe_device *ppe_dev);
++
++#endif
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -16,6 +16,39 @@
+ #define PPE_BM_SCH_CTRL_SCH_OFFSET GENMASK(14, 8)
+ #define PPE_BM_SCH_CTRL_SCH_EN BIT(31)
+
++/* PPE drop counters. */
++#define PPE_DROP_CNT_TBL_ADDR 0xb024
++#define PPE_DROP_CNT_TBL_ENTRIES 8
++#define PPE_DROP_CNT_TBL_INC 4
++
++/* BM port drop counters. */
++#define PPE_DROP_STAT_TBL_ADDR 0xe000
++#define PPE_DROP_STAT_TBL_ENTRIES 30
++#define PPE_DROP_STAT_TBL_INC 0x10
++
++#define PPE_EPE_DBG_IN_CNT_ADDR 0x26054
++#define PPE_EPE_DBG_OUT_CNT_ADDR 0x26070
++
++/* Egress VLAN counters. */
++#define PPE_EG_VSI_COUNTER_TBL_ADDR 0x41000
++#define PPE_EG_VSI_COUNTER_TBL_ENTRIES 64
++#define PPE_EG_VSI_COUNTER_TBL_INC 0x10
++
++/* Port TX counters. */
++#define PPE_PORT_TX_COUNTER_TBL_ADDR 0x45000
++#define PPE_PORT_TX_COUNTER_TBL_ENTRIES 8
++#define PPE_PORT_TX_COUNTER_TBL_INC 0x10
++
++/* Virtual port TX counters. */
++#define PPE_VPORT_TX_COUNTER_TBL_ADDR 0x47000
++#define PPE_VPORT_TX_COUNTER_TBL_ENTRIES 256
++#define PPE_VPORT_TX_COUNTER_TBL_INC 0x10
++
++/* Queue counters. */
++#define PPE_QUEUE_TX_COUNTER_TBL_ADDR 0x4a000
++#define PPE_QUEUE_TX_COUNTER_TBL_ENTRIES 300
++#define PPE_QUEUE_TX_COUNTER_TBL_INC 0x10
++
+ /* RSS settings are to calculate the random RSS hash value generated during
+ * packet receive to ARM cores. This hash is then used to generate the queue
+ * offset used to determine the queue used to transmit the packet to ARM cores.
+@@ -213,6 +246,51 @@
+ #define PPE_L2_PORT_SET_DST_INFO(tbl_cfg, value) \
+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_L2_VP_PORT_W0_DST_INFO)
+
++/* Port RX and RX drop counters. */
++#define PPE_PORT_RX_CNT_TBL_ADDR 0x150000
++#define PPE_PORT_RX_CNT_TBL_ENTRIES 256
++#define PPE_PORT_RX_CNT_TBL_INC 0x20
++
++/* Physical port RX and RX drop counters. */
++#define PPE_PHY_PORT_RX_CNT_TBL_ADDR 0x156000
++#define PPE_PHY_PORT_RX_CNT_TBL_ENTRIES 8
++#define PPE_PHY_PORT_RX_CNT_TBL_INC 0x20
++
++/* Counters for the packet to CPU port. */
++#define PPE_DROP_CPU_CNT_TBL_ADDR 0x160000
++#define PPE_DROP_CPU_CNT_TBL_ENTRIES 1280
++#define PPE_DROP_CPU_CNT_TBL_INC 0x10
++
++/* VLAN counters. */
++#define PPE_VLAN_CNT_TBL_ADDR 0x178000
++#define PPE_VLAN_CNT_TBL_ENTRIES 64
++#define PPE_VLAN_CNT_TBL_INC 0x10
++
++/* PPE L2 counters. */
++#define PPE_PRE_L2_CNT_TBL_ADDR 0x17c000
++#define PPE_PRE_L2_CNT_TBL_ENTRIES 64
++#define PPE_PRE_L2_CNT_TBL_INC 0x20
++
++/* Port TX drop counters. */
++#define PPE_PORT_TX_DROP_CNT_TBL_ADDR 0x17d000
++#define PPE_PORT_TX_DROP_CNT_TBL_ENTRIES 8
++#define PPE_PORT_TX_DROP_CNT_TBL_INC 0x10
++
++/* Virtual port TX counters. */
++#define PPE_VPORT_TX_DROP_CNT_TBL_ADDR 0x17e000
++#define PPE_VPORT_TX_DROP_CNT_TBL_ENTRIES 256
++#define PPE_VPORT_TX_DROP_CNT_TBL_INC 0x10
++
++/* Counters for the tunnel packet. */
++#define PPE_TPR_PKT_CNT_TBL_ADDR 0x1d0080
++#define PPE_TPR_PKT_CNT_TBL_ENTRIES 8
++#define PPE_TPR_PKT_CNT_TBL_INC 4
++
++/* Counters for the all packet received. */
++#define PPE_IPR_PKT_CNT_TBL_ADDR 0x1e0080
++#define PPE_IPR_PKT_CNT_TBL_ENTRIES 8
++#define PPE_IPR_PKT_CNT_TBL_INC 4
++
+ /* PPE service code configuration for the tunnel packet. */
+ #define PPE_TL_SERVICE_TBL_ADDR 0x306000
+ #define PPE_TL_SERVICE_TBL_ENTRIES 256
+@@ -325,6 +403,18 @@
+ #define PPE_BM_PORT_GROUP_ID_INC 0x4
+ #define PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID GENMASK(1, 0)
+
++/* Counters for PPE buffers used for packets cached. */
++#define PPE_BM_USED_CNT_TBL_ADDR 0x6001c0
++#define PPE_BM_USED_CNT_TBL_ENTRIES 15
++#define PPE_BM_USED_CNT_TBL_INC 0x4
++#define PPE_BM_USED_CNT_VAL GENMASK(10, 0)
++
++/* Counters for PPE buffers used for packets received after pause frame sent. */
++#define PPE_BM_REACT_CNT_TBL_ADDR 0x600240
++#define PPE_BM_REACT_CNT_TBL_ENTRIES 15
++#define PPE_BM_REACT_CNT_TBL_INC 0x4
++#define PPE_BM_REACT_CNT_VAL GENMASK(8, 0)
++
+ #define PPE_BM_SHARED_GROUP_CFG_ADDR 0x600290
+ #define PPE_BM_SHARED_GROUP_CFG_ENTRIES 4
+ #define PPE_BM_SHARED_GROUP_CFG_INC 0x4
+@@ -449,6 +539,18 @@
+ #define PPE_AC_GRP_SET_BUF_LIMIT(tbl_cfg, value) \
+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_AC_GRP_W1_BUF_LIMIT)
+
++/* Counters for packets handled by unicast queues (0-255). */
++#define PPE_AC_UNICAST_QUEUE_CNT_TBL_ADDR 0x84e000
++#define PPE_AC_UNICAST_QUEUE_CNT_TBL_ENTRIES 256
++#define PPE_AC_UNICAST_QUEUE_CNT_TBL_INC 0x10
++#define PPE_AC_UNICAST_QUEUE_CNT_TBL_PEND_CNT GENMASK(12, 0)
++
++/* Counters for packets handled by multicast queues (256-299). */
++#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_ADDR 0x852000
++#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_ENTRIES 44
++#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_INC 0x10
++#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_PEND_CNT GENMASK(12, 0)
++
+ /* Table addresses for per-queue enqueue setting. */
+ #define PPE_ENQ_OPR_TBL_ADDR 0x85c000
+ #define PPE_ENQ_OPR_TBL_ENTRIES 300
--- /dev/null
+From 28098c348414fa97531449d4e27ba1587e67c2d9 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Sun, 9 Feb 2025 22:29:48 +0800
+Subject: [PATCH] MAINTAINERS: Add maintainer for Qualcomm PPE driver
+
+Add maintainer entry for PPE (Packet Process Engine) driver
+supported for Qualcomm IPQ SoCs.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ MAINTAINERS | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -19131,6 +19131,14 @@ S: Maintained
+ F: Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
+ F: drivers/mtd/nand/raw/qcom_nandc.c
+
++QUALCOMM PPE DRIVER
++M: Luo Jie <quic_luoj@quicinc.com>
++L: netdev@vger.kernel.org
++S: Supported
++F: Documentation/devicetree/bindings/net/qcom,ipq9574-ppe.yaml
++F: Documentation/networking/device_drivers/ethernet/qualcomm/ppe/ppe.rst
++F: drivers/net/ethernet/qualcomm/ppe/
++
+ QUALCOMM QSEECOM DRIVER
+ M: Maximilian Luz <luzmaximilian@gmail.com>
+ L: linux-arm-msm@vger.kernel.org