]> git.ipfire.org Git - thirdparty/openwrt.git/commitdiff
airoha: switch to 6.12 and drop 6.6 20137/head
authorAndrew LaMarche <andrewjlamarche@gmail.com>
Tue, 23 Sep 2025 14:15:40 +0000 (10:15 -0400)
committerRobert Marko <robimarko@gmail.com>
Thu, 25 Sep 2025 09:00:36 +0000 (11:00 +0200)
Switch Airoha targets to 6.12 and drop 6.6 support.

Signed-off-by: Andrew LaMarche <andrewjlamarche@gmail.com>
Link: https://github.com/openwrt/openwrt/pull/20137
Signed-off-by: Robert Marko <robimarko@gmail.com>
159 files changed:
target/linux/airoha/Makefile
target/linux/airoha/an7581/config-6.6 [deleted file]
target/linux/airoha/en7523/config-6.6 [deleted file]
target/linux/airoha/patches-6.6/001-v6.10-arm64-add-Airoha-EN7581-platform.patch [deleted file]
target/linux/airoha/patches-6.6/002-v6.11-i2c-mt7621-Add-Airoha-EN7581-i2c-support.patch [deleted file]
target/linux/airoha/patches-6.6/006-v6.11-net-airoha-Introduce-ethernet-support-for-EN7581-SoC.patch [deleted file]
target/linux/airoha/patches-6.6/007-v6.11-net-airoha-fix-error-branch-in-airoha_dev_xmit-and-a.patch [deleted file]
target/linux/airoha/patches-6.6/008-v6.11-net-airoha-Fix-NULL-pointer-dereference-in-airoha_qd.patch [deleted file]
target/linux/airoha/patches-6.6/009-v6.11-net-airoha-Fix-MBI_RX_AGE_SEL_MASK-definition.patch [deleted file]
target/linux/airoha/patches-6.6/010-01-v6.12-net-airoha-Introduce-airoha_qdma-struct.patch [deleted file]
target/linux/airoha/patches-6.6/010-02-v6.12-net-airoha-Move-airoha_queues-in-airoha_qdma.patch [deleted file]
target/linux/airoha/patches-6.6/010-03-v6.12-net-airoha-Move-irq_mask-in-airoha_qdma-structure.patch [deleted file]
target/linux/airoha/patches-6.6/010-04-v6.12-net-airoha-Add-airoha_qdma-pointer-in-airoha_tx_irq_.patch [deleted file]
target/linux/airoha/patches-6.6/010-05-v6.12-net-airoha-Use-qdma-pointer-as-private-structure-in-.patch [deleted file]
target/linux/airoha/patches-6.6/010-06-v6.12-net-airoha-Allow-mapping-IO-region-for-multiple-qdma.patch [deleted file]
target/linux/airoha/patches-6.6/010-07-v6.12-net-airoha-Start-all-qdma-NAPIs-in-airoha_probe.patch [deleted file]
target/linux/airoha/patches-6.6/010-08-v6.12-net-airoha-Link-the-gdm-port-to-the-selected-qdma-co.patch [deleted file]
target/linux/airoha/patches-6.6/011-v6.12-net-airoha-honor-reset-return-value-in-airoha_hw_ini.patch [deleted file]
target/linux/airoha/patches-6.6/012-v6.12-net-airoha-configure-hw-mac-address-according-to-the.patch [deleted file]
target/linux/airoha/patches-6.6/013-v6.12-net-airoha-fix-module-autoloading.patch [deleted file]
target/linux/airoha/patches-6.6/014-01-v6.13-net-airoha-fix-PSE-memory-configuration-in-airoha_fe.patch [deleted file]
target/linux/airoha/patches-6.6/014-02-v6.13-net-airoha-read-default-PSE-reserved-pages-value-bef.patch [deleted file]
target/linux/airoha/patches-6.6/015-v6.12-net-airoha-Update-tx-cpu-dma-ring-idx-at-the-end-of-.patch [deleted file]
target/linux/airoha/patches-6.6/016-v6.13-net-airoha-Fix-EGRESS_RATE_METER_EN_MASK-definition.patch [deleted file]
target/linux/airoha/patches-6.6/017-v6.13-net-airoha-Implement-BQL-support.patch [deleted file]
target/linux/airoha/patches-6.6/018-01-v6.10-clk-en7523-Add-en_clk_soc_data-data-structure.patch [deleted file]
target/linux/airoha/patches-6.6/018-02-v6.10-clk-en7523-Add-EN7581-support.patch [deleted file]
target/linux/airoha/patches-6.6/019-01-v6.11-clk-en7523-Add-reset-controller-support-for-EN7581-S.patch [deleted file]
target/linux/airoha/patches-6.6/019-02-v6.11-clk-en7523-Remove-pcie-prepare-unpreare-callbacks-fo.patch [deleted file]
target/linux/airoha/patches-6.6/019-03-v6.11-clk-en7523-Remove-PCIe-reset-open-drain-configuratio.patch [deleted file]
target/linux/airoha/patches-6.6/020-v6.11-dt-bindings-clock-airoha-Add-reset-support-to-EN7581.patch [deleted file]
target/linux/airoha/patches-6.6/021-01-v6.12-PCI-mediatek-gen3-Add-mtk_gen3_pcie_pdata-data-struc.patch [deleted file]
target/linux/airoha/patches-6.6/021-02-v6.12-PCI-mediatek-gen3-Rely-on-reset_bulk-APIs-for-PHY-re.patch [deleted file]
target/linux/airoha/patches-6.6/021-03-v6.12-PCI-mediatek-gen3-Add-Airoha-EN7581-support.patch [deleted file]
target/linux/airoha/patches-6.6/022-v6.11-phy-airoha-Add-PCIe-PHY-driver-for-EN7581-SoC.patch [deleted file]
target/linux/airoha/patches-6.6/023-v6.11-phy-airoha-Add-dtime-and-Rx-AEQ-IO-registers.patch [deleted file]
target/linux/airoha/patches-6.6/024-v6.12-phy-airoha-adjust-initialization-delay-in-airoha_pci.patch [deleted file]
target/linux/airoha/patches-6.6/025-01-v6.13-phy-airoha-Fix-REG_CSR_2L_PLL_CMN_RESERVE0-config-in.patch [deleted file]
target/linux/airoha/patches-6.6/025-02-v6.13-phy-airoha-Fix-REG_PCIE_PMA_TX_RESET-config-in-airoh.patch [deleted file]
target/linux/airoha/patches-6.6/025-03-v6.13-phy-airoha-Fix-REG_CSR_2L_JCPLL_SDM_HREN-config-in-a.patch [deleted file]
target/linux/airoha/patches-6.6/025-04-v6.13-phy-airoha-Fix-REG_CSR_2L_RX-0-1-_REV0-definitions.patch [deleted file]
target/linux/airoha/patches-6.6/025-v6.10-spi-airoha-add-SPI-NAND-Flash-controller-driver.patch [deleted file]
target/linux/airoha/patches-6.6/026-01-v6.12-spi-airoha-fix-dirmap_-read-write-operations.patch [deleted file]
target/linux/airoha/patches-6.6/026-02-v6.12-spi-airoha-fix-airoha_snand_-write-read-_data-data_l.patch [deleted file]
target/linux/airoha/patches-6.6/027-v6.12-spi-airoha-remove-read-cache-in-airoha_snand_dirmap_.patch [deleted file]
target/linux/airoha/patches-6.6/028-v6.13-spi-airoha-do-not-keep-tx-rx-dma-buffer-always-mappe.patch [deleted file]
target/linux/airoha/patches-6.6/029-v6.12-net-dsa-mt7530-Add-EN7581-support.patch [deleted file]
target/linux/airoha/patches-6.6/030-v6.13-hwrng-airoha-add-support-for-Airoha-EN7581-TRNG.patch [deleted file]
target/linux/airoha/patches-6.6/031-01-v6.13-net-airoha-Read-completion-queue-data-in-airoha_qdma.patch [deleted file]
target/linux/airoha/patches-6.6/031-02-v6.13-net-airoha-Simplify-Tx-napi-logic.patch [deleted file]
target/linux/airoha/patches-6.6/032-v6.13-watchdog-Add-support-for-Airoha-EN7851-watchdog.patch [deleted file]
target/linux/airoha/patches-6.6/033-01-v6.13-clk-en7523-remove-REG_PCIE-_-MEM-MEM_MASK-configurat.patch [deleted file]
target/linux/airoha/patches-6.6/033-02-v6.13-clk-en7523-move-clock_register-in-hw_init-callback.patch [deleted file]
target/linux/airoha/patches-6.6/033-03-v6.13-clk-en7523-introduce-chip_scu-regmap.patch [deleted file]
target/linux/airoha/patches-6.6/033-04-v6.13-clk-en7523-fix-estimation-of-fixed-rate-for-EN7581.patch [deleted file]
target/linux/airoha/patches-6.6/033-05-v6.13-clk-en7523-move-en7581_reset_register-in-en7581_clk_.patch [deleted file]
target/linux/airoha/patches-6.6/033-06-v6.13-clk-en7523-map-io-region-in-a-single-block.patch [deleted file]
target/linux/airoha/patches-6.6/034-v6.13-pinctrl-airoha-Add-support-for-EN7581-SoC.patch [deleted file]
target/linux/airoha/patches-6.6/035-v6.13-clk-en7523-Fix-wrong-BUS-clock-for-EN7581.patch [deleted file]
target/linux/airoha/patches-6.6/036-v6.13-net-airoha-Fix-typo-in-REG_CDM2_FWD_CFG-configuratio.patch [deleted file]
target/linux/airoha/patches-6.6/037-v6.14-net-airoha-Fix-error-path-in-airoha_probe.patch [deleted file]
target/linux/airoha/patches-6.6/038-01-v6.14-net-airoha-Enable-Tx-drop-capability-for-each-Tx-DMA.patch [deleted file]
target/linux/airoha/patches-6.6/038-02-v6.14-net-airoha-Introduce-ndo_select_queue-callback.patch [deleted file]
target/linux/airoha/patches-6.6/038-03-v6.14-net-airoha-Add-sched-ETS-offload-support.patch [deleted file]
target/linux/airoha/patches-6.6/038-04-v6.14-net-airoha-Add-sched-HTB-offload-support.patch [deleted file]
target/linux/airoha/patches-6.6/039-v6.14-cpufreq-airoha-Add-EN7581-CPUFreq-SMCCC-driver.patch [deleted file]
target/linux/airoha/patches-6.6/039-v6.14-net-airoha-Enforce-ETS-Qdisc-priomap.patch [deleted file]
target/linux/airoha/patches-6.6/040-v6.14-pmdomain-airoha-Add-Airoha-CPU-PM-Domain-support.patch [deleted file]
target/linux/airoha/patches-6.6/041-01-v6.14-clk-en7523-Rework-clock-handling-for-different-clock.patch [deleted file]
target/linux/airoha/patches-6.6/041-02-v6.14-dt-bindings-clock-drop-NUM_CLOCKS-define-for-EN7581.patch [deleted file]
target/linux/airoha/patches-6.6/041-03-v6.14-dt-bindings-clock-add-ID-for-eMMC-for-EN7581.patch [deleted file]
target/linux/airoha/patches-6.6/041-04-v6.14-clk-en7523-Add-clock-for-eMMC-for-EN7581.patch [deleted file]
target/linux/airoha/patches-6.6/042-01-v6.14-PCI-mediatek-gen3-Rely-on-clk_bulk_prepare_enable-in.patch [deleted file]
target/linux/airoha/patches-6.6/042-02-v6.14-PCI-mediatek-gen3-Move-reset-assert-callbacks-in-.po.patch [deleted file]
target/linux/airoha/patches-6.6/042-03-v6.14-PCI-mediatek-gen3-Add-comment-about-initialization-o.patch [deleted file]
target/linux/airoha/patches-6.6/042-04-v6.14-PCI-mediatek-gen3-Move-reset-delay-in-mtk_pcie_en758.patch [deleted file]
target/linux/airoha/patches-6.6/042-05-v6.14-PCI-mediatek-gen3-Rely-on-msleep-in-mtk_pcie_en7581_.patch [deleted file]
target/linux/airoha/patches-6.6/042-06-v6.14-PCI-mediatek-gen3-Avoid-PCIe-resetting-via-PERST-for.patch [deleted file]
target/linux/airoha/patches-6.6/043-v6.15-PCI-mediatek-gen3-Remove-leftover-mac_reset-assert-f.patch [deleted file]
target/linux/airoha/patches-6.6/044-v6.15-PCI-mediatek-gen3-Configure-PBUS_CSR-registers-for-E.patch [deleted file]
target/linux/airoha/patches-6.6/045-v6.14-net-airoha-Fix-wrong-GDM4-register-definition.patch [deleted file]
target/linux/airoha/patches-6.6/046-v6.15-net-airoha-Fix-TSO-support-for-header-cloned-skbs.patch [deleted file]
target/linux/airoha/patches-6.6/047-v6.13-net-airoha-Reset-BQL-stopping-the-netdevice.patch [deleted file]
target/linux/airoha/patches-6.6/048-01-v6.15-net-airoha-Move-airoha_eth-driver-in-a-dedicated-fol.patch [deleted file]
target/linux/airoha/patches-6.6/048-02-v6.15-net-airoha-Move-definitions-in-airoha_eth.h.patch [deleted file]
target/linux/airoha/patches-6.6/048-03-v6.15-net-airoha-Move-reg-write-utility-routines-in-airoha.patch [deleted file]
target/linux/airoha/patches-6.6/048-04-v6.15-net-airoha-Move-register-definitions-in-airoha_regs..patch [deleted file]
target/linux/airoha/patches-6.6/048-05-v6.15-net-airoha-Move-DSA-tag-in-DMA-descriptor.patch [deleted file]
target/linux/airoha/patches-6.6/048-06-v6.15-net-dsa-mt7530-Enable-Rx-sptag-for-EN7581-SoC.patch [deleted file]
target/linux/airoha/patches-6.6/048-07-v6.15-net-airoha-Enable-support-for-multiple-net_devices.patch [deleted file]
target/linux/airoha/patches-6.6/048-08-v6.15-net-airoha-Move-REG_GDM_FWD_CFG-initialization-in-ai.patch [deleted file]
target/linux/airoha/patches-6.6/048-09-v6.15-net-airoha-Rename-airoha_set_gdm_port_fwd_cfg-in-air.patch [deleted file]
target/linux/airoha/patches-6.6/048-12-v6.15-net-airoha-Introduce-Airoha-NPU-support.patch [deleted file]
target/linux/airoha/patches-6.6/048-13-v6.15-net-airoha-Introduce-flowtable-offload-support.patch [deleted file]
target/linux/airoha/patches-6.6/048-14-v6.15-net-airoha-Add-loopback-support-for-GDM2.patch [deleted file]
target/linux/airoha/patches-6.6/048-15-v6.15-net-airoha-Introduce-PPE-debugfs-support.patch [deleted file]
target/linux/airoha/patches-6.6/049-01-v6.16-thermal-drivers-Add-support-for-Airoha-EN7581-therma.patch [deleted file]
target/linux/airoha/patches-6.6/049-02-v6.16-thermal-drivers-airoha-Fix-spelling-mistake.patch [deleted file]
target/linux/airoha/patches-6.6/051-v6.15-pinctrl-airoha-fix-wrong-PHY-LED-mapping-and-PHY2-LE.patch [deleted file]
target/linux/airoha/patches-6.6/060-v6.16-02-net-phy-mediatek-add-Airoha-PHY-ID-to-SoC-driver.patch [deleted file]
target/linux/airoha/patches-6.6/063-01-v6.15-net-airoha-Move-min-max-packet-len-configuration-in-.patch [deleted file]
target/linux/airoha/patches-6.6/063-02-v6.15-net-airoha-Enable-Rx-Scatter-Gather.patch [deleted file]
target/linux/airoha/patches-6.6/063-03-v6.15-net-airoha-Introduce-airoha_dev_change_mtu-callback.patch [deleted file]
target/linux/airoha/patches-6.6/063-04-v6.15-net-airoha-Increase-max-mtu-to-9k.patch [deleted file]
target/linux/airoha/patches-6.6/063-05-v6.15-net-airoha-Fix-lan4-support-in-airoha_qdma_get_gdm_p.patch [deleted file]
target/linux/airoha/patches-6.6/063-06-v6.15-net-airoha-Enable-TSO-Scatter-Gather-for-LAN-port.patch [deleted file]
target/linux/airoha/patches-6.6/064-v6.15-net-airoha-Fix-dev-dsa_ptr-check-in-airoha_get_dsa_t.patch [deleted file]
target/linux/airoha/patches-6.6/065-v6.15-net-airoha-fix-CONFIG_DEBUG_FS-check.patch [deleted file]
target/linux/airoha/patches-6.6/066-01-v6.15-net-airoha-Fix-qid-report-in-airoha_tc_get_htb_get_l.patch [deleted file]
target/linux/airoha/patches-6.6/066-02-v6.15-net-airoha-Fix-ETS-priomap-validation.patch [deleted file]
target/linux/airoha/patches-6.6/067-v6.15-net-airoha-Validate-egress-gdm-port-in-airoha_ppe_fo.patch [deleted file]
target/linux/airoha/patches-6.6/068-01-v6.16-net-airoha-Add-l2_flows-rhashtable.patch [deleted file]
target/linux/airoha/patches-6.6/068-02-v6.16-net-airoha-Add-L2-hw-acceleration-support.patch [deleted file]
target/linux/airoha/patches-6.6/069-v6.16-net-airoha-Add-matchall-filter-offload-support.patch [deleted file]
target/linux/airoha/patches-6.6/070-01-v6.16-net-airoha-Introduce-airoha_irq_bank-struct.patch [deleted file]
target/linux/airoha/patches-6.6/070-02-v6.16-net-airoha-Enable-multiple-IRQ-lines-support-in-airo.patch [deleted file]
target/linux/airoha/patches-6.6/071-v6.15-net-airoha-Add-missing-field-to-ppe_mbox_data-struct.patch [deleted file]
target/linux/airoha/patches-6.6/072-v6.15-net-airoha-Fix-page-recycling-in-airoha_qdma_rx_proc.patch [deleted file]
target/linux/airoha/patches-6.6/073-01-v6.16-net-airoha-npu-Move-memory-allocation-in-airoha_npu_.patch [deleted file]
target/linux/airoha/patches-6.6/073-02-v6.16-net-airoha-Add-FLOW_CLS_STATS-callback-support.patch [deleted file]
target/linux/airoha/patches-6.6/073-03-v6.16-net-airoha-ppe-Disable-packet-keepalive.patch [deleted file]
target/linux/airoha/patches-6.6/074-01-v6.16-net-airoha-Do-not-store-hfwd-references-in-airoha_qd.patch [deleted file]
target/linux/airoha/patches-6.6/074-02-v6.16-net-airoha-Add-the-capability-to-allocate-hwfd-buffe.patch [deleted file]
target/linux/airoha/patches-6.6/074-03-v6.16-net-airoha-Add-the-capability-to-allocate-hfwd-descr.patch [deleted file]
target/linux/airoha/patches-6.6/075-v6.16-net-airoha-Fix-an-error-handling-path-in-airoha_allo.patch [deleted file]
target/linux/airoha/patches-6.6/076-01-v6.16-net-airoha-Initialize-PPE-UPDMEM-source-mac-table.patch [deleted file]
target/linux/airoha/patches-6.6/076-02-v6.16-net-airoha-Fix-IPv6-hw-acceleration-in-bridge-mode.patch [deleted file]
target/linux/airoha/patches-6.6/076-03-v6.16-net-airoha-Fix-smac_id-configuration-in-bridge-mode.patch [deleted file]
target/linux/airoha/patches-6.6/077-v6.17-net-airoha-Add-PPPoE-offload-support.patch [deleted file]
target/linux/airoha/patches-6.6/078-v6.16-net-airoha-Enable-RX-queues-16-31.patch [deleted file]
target/linux/airoha/patches-6.6/079-v6.16-net-airoha-Always-check-return-value-from-airoha_ppe.patch [deleted file]
target/linux/airoha/patches-6.6/080-01-v6.16-net-airoha-Compute-number-of-descriptors-according-t.patch [deleted file]
target/linux/airoha/patches-6.6/080-02-v6.16-net-airoha-Differentiate-hwfd-buffer-size-for-QDMA0-.patch [deleted file]
target/linux/airoha/patches-6.6/081-v6.17-net-airoha-Fix-PPE-table-access-in-airoha_ppe_debugf.patch [deleted file]
target/linux/airoha/patches-6.6/082-v6.17-net-airoha-ppe-Do-not-invalid-PPE-entries-in-case-of.patch [deleted file]
target/linux/airoha/patches-6.6/083-01-v6.13-resource-Add-resource-set-range-and-size-helpers.patch [deleted file]
target/linux/airoha/patches-6.6/083-02-v6.16-of-reserved_mem-Add-functions-to-parse-memory-region.patch [deleted file]
target/linux/airoha/patches-6.6/084-01-v6.18-net-airoha-npu-Add-NPU-wlan-memory-initialization-co.patch [deleted file]
target/linux/airoha/patches-6.6/084-02-v6.18-net-airoha-npu-Add-wlan_-send-get-_msg-NPU-callbacks.patch [deleted file]
target/linux/airoha/patches-6.6/084-03-v6.18-net-airoha-npu-Add-wlan-irq-management-callbacks.patch [deleted file]
target/linux/airoha/patches-6.6/084-04-v6.18-net-airoha-npu-Read-NPU-wlan-interrupt-lines-from-th.patch [deleted file]
target/linux/airoha/patches-6.6/084-05-v6.18-net-airoha-npu-Enable-core-3-for-WiFi-offloading.patch [deleted file]
target/linux/airoha/patches-6.6/084-06-v6.18-net-airoha-Add-airoha_offload.h-header.patch [deleted file]
target/linux/airoha/patches-6.6/085-v6.18-net-airoha-Add-wlan-flowtable-TX-offload.patch [deleted file]
target/linux/airoha/patches-6.6/086-01-v6.18-net-airoha-Rely-on-airoha_eth-struct-in-airoha_ppe_f.patch [deleted file]
target/linux/airoha/patches-6.6/086-02-v6.18-net-airoha-Add-airoha_ppe_dev-struct-definition.patch [deleted file]
target/linux/airoha/patches-6.6/086-03-v6.18-net-airoha-Introduce-check_skb-callback-in-ppe_dev-o.patch [deleted file]
target/linux/airoha/patches-6.6/087-v6.17-pinctrl-airoha-Fix-return-value-in-pinconf-callbacks.patch [deleted file]
target/linux/airoha/patches-6.6/089-v6.14-net-airoha-Fix-channel-configuration-for-ETS-Qdisc.patch [deleted file]
target/linux/airoha/patches-6.6/091-01-v6.18-pinctrl-airoha-fix-wrong-PHY-LED-mux-value-for-LED1-.patch [deleted file]
target/linux/airoha/patches-6.6/091-02-v6.18-pinctrl-airoha-fix-wrong-MDIO-function-bitmaks.patch [deleted file]
target/linux/airoha/patches-6.6/104-i2c-mt7621-optional-reset.patch [deleted file]
target/linux/airoha/patches-6.6/105-uart-add-en7523-support.patch [deleted file]
target/linux/airoha/patches-6.6/108-pwm-airoha-Add-support-for-EN7581-SoC.patch [deleted file]
target/linux/airoha/patches-6.6/200-spinlock-extend-guard-with-spinlock_bh-variants.patch [deleted file]
target/linux/airoha/patches-6.6/201-crypto-Add-Mediatek-EIP-93-crypto-engine-support.patch [deleted file]
target/linux/airoha/patches-6.6/300-spi-Add-support-for-the-Airoha-EN7523-SoC-SPI-contro.patch [deleted file]
target/linux/airoha/patches-6.6/900-airoha-bmt-support.patch [deleted file]
target/linux/airoha/patches-6.6/901-snand-mtk-bmt-support.patch [deleted file]

index 698dba3d4ff89a839f58041c0e6f17382a1dcfdc..7e64485ae19078732910d957a90eb254fec95de8 100644 (file)
@@ -6,8 +6,7 @@ BOARDNAME:=Airoha ARM
 SUBTARGETS:=en7523 an7581
 FEATURES:=dt squashfs nand ramdisk gpio
 
-KERNEL_PATCHVER:=6.6
-KERNEL_TESTING_PATCHVER:=6.12
+KERNEL_PATCHVER:=6.12
 
 include $(INCLUDE_DIR)/target.mk
 
diff --git a/target/linux/airoha/an7581/config-6.6 b/target/linux/airoha/an7581/config-6.6
deleted file mode 100644 (file)
index a079241..0000000
+++ /dev/null
@@ -1,371 +0,0 @@
-CONFIG_64BIT=y
-CONFIG_AIROHA_CPU_PM_DOMAIN=y
-CONFIG_AIROHA_THERMAL=y
-CONFIG_AIROHA_WATCHDOG=y
-CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y
-CONFIG_ARCH_AIROHA=y
-CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y
-CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y
-CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y
-CONFIG_ARCH_DMA_ADDR_T_64BIT=y
-CONFIG_ARCH_FORCE_MAX_ORDER=10
-CONFIG_ARCH_KEEP_MEMBLOCK=y
-CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y
-CONFIG_ARCH_MMAP_RND_BITS=18
-CONFIG_ARCH_MMAP_RND_BITS_MAX=24
-CONFIG_ARCH_MMAP_RND_BITS_MIN=18
-CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11
-CONFIG_ARCH_PROC_KCORE_TEXT=y
-CONFIG_ARCH_SPARSEMEM_ENABLE=y
-CONFIG_ARCH_STACKWALK=y
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
-CONFIG_ARCH_WANTS_NO_INSTR=y
-CONFIG_ARCH_WANTS_THP_SWAP=y
-CONFIG_ARM64=y
-CONFIG_ARM64_4K_PAGES=y
-CONFIG_ARM64_ERRATUM_843419=y
-CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y
-CONFIG_ARM64_PAGE_SHIFT=12
-CONFIG_ARM64_PA_BITS=48
-CONFIG_ARM64_PA_BITS_48=y
-CONFIG_ARM64_TAGGED_ADDR_ABI=y
-CONFIG_ARM64_VA_BITS=39
-CONFIG_ARM64_VA_BITS_39=y
-CONFIG_ARM_AIROHA_SOC_CPUFREQ=y
-CONFIG_ARM_AMBA=y
-CONFIG_ARM_ARCH_TIMER=y
-CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
-CONFIG_ARM_GIC=y
-CONFIG_ARM_GIC_V2M=y
-CONFIG_ARM_GIC_V3=y
-CONFIG_ARM_GIC_V3_ITS=y
-CONFIG_ARM_GIC_V3_ITS_PCI=y
-CONFIG_ARM_PMU=y
-CONFIG_ARM_PMUV3=y
-CONFIG_ARM_PSCI_FW=y
-CONFIG_ARM_SMCCC_SOC_ID=y
-# CONFIG_ARM_SMMU is not set
-# CONFIG_ARM_SMMU_V3 is not set
-CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
-CONFIG_BLK_MQ_PCI=y
-CONFIG_BLK_PM=y
-CONFIG_BUFFER_HEAD=y
-CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y
-CONFIG_CC_HAVE_SHADOW_CALL_STACK=y
-CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y
-CONFIG_CLONE_BACKWARDS=y
-CONFIG_COMMON_CLK=y
-CONFIG_COMMON_CLK_EN7523=y
-CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
-# CONFIG_COMPAT_32BIT_TIME is not set
-CONFIG_CONTEXT_TRACKING=y
-CONFIG_CONTEXT_TRACKING_IDLE=y
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
-CONFIG_CPU_FREQ_GOV_ATTR_SET=y
-CONFIG_CPU_FREQ_GOV_COMMON=y
-CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=y
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_STAT=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_RMAP=y
-CONFIG_CRC16=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRYPTO_CRC32C=y
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_DEV_EIP93=y
-CONFIG_CRYPTO_DRBG=y
-CONFIG_CRYPTO_DRBG_HMAC=y
-CONFIG_CRYPTO_DRBG_MENU=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_HASH_INFO=y
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_JITTERENTROPY=y
-CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y
-CONFIG_CRYPTO_LIB_GF128MUL=y
-CONFIG_CRYPTO_LIB_SHA1=y
-CONFIG_CRYPTO_LIB_SHA256=y
-CONFIG_CRYPTO_LIB_UTILS=y
-CONFIG_CRYPTO_LZO=y
-CONFIG_CRYPTO_RNG=y
-CONFIG_CRYPTO_RNG2=y
-CONFIG_CRYPTO_RNG_DEFAULT=y
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA3=y
-CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_ZSTD=y
-CONFIG_DCACHE_WORD_ACCESS=y
-CONFIG_DEBUG_MISC=y
-CONFIG_DMADEVICES=y
-CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y
-CONFIG_DMA_DIRECT_REMAP=y
-CONFIG_DMA_ENGINE=y
-CONFIG_DMA_OF=y
-CONFIG_DTC=y
-CONFIG_EDAC_SUPPORT=y
-CONFIG_EXT4_FS=y
-CONFIG_FIXED_PHY=y
-CONFIG_FIX_EARLYCON_MEM=y
-CONFIG_FRAME_POINTER=y
-CONFIG_FS_IOMAP=y
-CONFIG_FS_MBCACHE=y
-CONFIG_FUNCTION_ALIGNMENT=4
-CONFIG_FUNCTION_ALIGNMENT_4B=y
-CONFIG_FWNODE_MDIO=y
-CONFIG_FW_CACHE=y
-# CONFIG_FW_LOADER_USER_HELPER is not set
-CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y
-CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_GENERIC_ARCH_TOPOLOGY=y
-CONFIG_GENERIC_BUG=y
-CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
-CONFIG_GENERIC_CPU_AUTOPROBE=y
-CONFIG_GENERIC_CPU_VULNERABILITIES=y
-CONFIG_GENERIC_CSUM=y
-CONFIG_GENERIC_EARLY_IOREMAP=y
-CONFIG_GENERIC_GETTIMEOFDAY=y
-CONFIG_GENERIC_IDLE_POLL_SETUP=y
-CONFIG_GENERIC_IOREMAP=y
-CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
-CONFIG_GENERIC_IRQ_SHOW=y
-CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
-CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y
-CONFIG_GENERIC_MSI_IRQ=y
-CONFIG_GENERIC_PCI_IOMAP=y
-CONFIG_GENERIC_PHY=y
-CONFIG_GENERIC_PINCONF=y
-CONFIG_GENERIC_PINCTRL_GROUPS=y
-CONFIG_GENERIC_PINMUX_FUNCTIONS=y
-CONFIG_GENERIC_SCHED_CLOCK=y
-CONFIG_GENERIC_SMP_IDLE_THREAD=y
-CONFIG_GENERIC_STRNCPY_FROM_USER=y
-CONFIG_GENERIC_STRNLEN_USER=y
-CONFIG_GENERIC_TIME_VSYSCALL=y
-CONFIG_GLOB=y
-CONFIG_GPIOLIB_IRQCHIP=y
-CONFIG_GPIO_CDEV=y
-CONFIG_GPIO_EN7523=y
-CONFIG_GPIO_GENERIC=y
-CONFIG_GRO_CELLS=y
-CONFIG_HARDIRQS_SW_RESEND=y
-CONFIG_HAS_DMA=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT=y
-CONFIG_HAS_IOPORT_MAP=y
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_AIROHA=y
-CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
-CONFIG_INET_AH=y
-CONFIG_INET_ESP=y
-# CONFIG_INET_ESP_OFFLOAD is not set
-CONFIG_INET_IPCOMP=y
-CONFIG_INET_TUNNEL=y
-CONFIG_INET_XFRM_TUNNEL=y
-CONFIG_IO_URING=y
-CONFIG_IPC_NS=y
-CONFIG_IPV6=y
-CONFIG_IPV6_MULTIPLE_TABLES=y
-# CONFIG_IPV6_SUBTREES is not set
-CONFIG_IP_MROUTE=y
-CONFIG_IP_MROUTE_COMMON=y
-# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
-CONFIG_IP_PNP=y
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_DHCP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_IP_ROUTE_MULTIPATH is not set
-# CONFIG_IP_ROUTE_VERBOSE is not set
-CONFIG_IRQCHIP=y
-CONFIG_IRQ_DOMAIN=y
-CONFIG_IRQ_DOMAIN_HIERARCHY=y
-CONFIG_IRQ_FORCED_THREADING=y
-CONFIG_IRQ_WORK=y
-CONFIG_JBD2=y
-CONFIG_LIBFDT=y
-CONFIG_LOCK_DEBUGGING_SUPPORT=y
-CONFIG_LOCK_SPIN_ON_OWNER=y
-CONFIG_LZO_COMPRESS=y
-CONFIG_LZO_DECOMPRESS=y
-CONFIG_MDIO_BUS=y
-CONFIG_MDIO_DEVICE=y
-CONFIG_MDIO_DEVRES=y
-# CONFIG_MEDIATEK_GE_SOC_PHY is not set
-# CONFIG_MEMCG is not set
-CONFIG_MFD_SYSCON=y
-CONFIG_MIGRATION=y
-CONFIG_MMC=y
-CONFIG_MMC_BLOCK=y
-CONFIG_MMC_CQHCI=y
-CONFIG_MMC_MTK=y
-CONFIG_MMU_LAZY_TLB_REFCOUNT=y
-CONFIG_MODULES_TREE_LOOKUP=y
-CONFIG_MODULES_USE_ELF_RELA=y
-CONFIG_MTD_NAND_CORE=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_NAND_MTK_BMT=y
-CONFIG_MTD_RAW_NAND=y
-CONFIG_MTD_SPI_NAND=y
-CONFIG_MTD_SPLIT_FIRMWARE=y
-CONFIG_MTD_SPLIT_FIT_FW=y
-CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_BEB_LIMIT=20
-CONFIG_MTD_UBI_BLOCK=y
-CONFIG_MTD_UBI_WL_THRESHOLD=4096
-CONFIG_MUTEX_SPIN_ON_OWNER=y
-CONFIG_NEED_DMA_MAP_STATE=y
-CONFIG_NEED_SG_DMA_LENGTH=y
-CONFIG_NET_AIROHA=y
-CONFIG_NET_DEVLINK=y
-CONFIG_NET_DSA=y
-CONFIG_NET_DSA_MT7530=y
-CONFIG_NET_DSA_MT7530_MDIO=y
-CONFIG_NET_DSA_MT7530_MMIO=y
-CONFIG_NET_DSA_TAG_MTK=y
-CONFIG_NET_FLOW_LIMIT=y
-# CONFIG_NET_MEDIATEK_SOC is not set
-CONFIG_NET_SELFTESTS=y
-# CONFIG_NET_VENDOR_3COM is not set
-CONFIG_NET_VENDOR_AIROHA=y
-# CONFIG_NET_VENDOR_MEDIATEK is not set
-CONFIG_NLS=y
-CONFIG_NO_HZ_COMMON=y
-CONFIG_NO_HZ_IDLE=y
-CONFIG_NR_CPUS=4
-CONFIG_OF=y
-CONFIG_OF_ADDRESS=y
-CONFIG_OF_EARLY_FLATTREE=y
-CONFIG_OF_FLATTREE=y
-CONFIG_OF_GPIO=y
-CONFIG_OF_IRQ=y
-CONFIG_OF_KOBJ=y
-CONFIG_OF_MDIO=y
-CONFIG_PAGE_POOL=y
-CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
-CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
-CONFIG_PARTITION_PERCPU=y
-CONFIG_PCI=y
-CONFIG_PCIEAER=y
-CONFIG_PCIEASPM=y
-# CONFIG_PCIEASPM_DEFAULT is not set
-CONFIG_PCIEASPM_PERFORMANCE=y
-# CONFIG_PCIEASPM_POWERSAVE is not set
-# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCIE_MEDIATEK=y
-CONFIG_PCIE_MEDIATEK_GEN3=y
-CONFIG_PCIE_PME=y
-CONFIG_PCI_DOMAINS=y
-CONFIG_PCI_DOMAINS_GENERIC=y
-CONFIG_PCI_MSI=y
-CONFIG_PCS_MTK_LYNXI=y
-CONFIG_PERF_EVENTS=y
-CONFIG_PER_VMA_LOCK=y
-CONFIG_PGTABLE_LEVELS=3
-CONFIG_PHYLIB=y
-CONFIG_PHYLIB_LEDS=y
-CONFIG_PHYLINK=y
-CONFIG_PHYS_ADDR_T_64BIT=y
-CONFIG_PHY_AIROHA_PCIE=y
-CONFIG_PINCTRL=y
-CONFIG_PINCTRL_AIROHA=y
-# CONFIG_PINCTRL_MT2712 is not set
-# CONFIG_PINCTRL_MT6765 is not set
-# CONFIG_PINCTRL_MT6795 is not set
-# CONFIG_PINCTRL_MT6797 is not set
-# CONFIG_PINCTRL_MT7622 is not set
-# CONFIG_PINCTRL_MT7981 is not set
-# CONFIG_PINCTRL_MT7986 is not set
-# CONFIG_PINCTRL_MT8173 is not set
-# CONFIG_PINCTRL_MT8183 is not set
-# CONFIG_PINCTRL_MT8186 is not set
-# CONFIG_PINCTRL_MT8188 is not set
-# CONFIG_PINCTRL_MT8516 is not set
-CONFIG_PM=y
-CONFIG_PM_CLK=y
-CONFIG_PM_OPP=y
-CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y
-CONFIG_POWER_RESET=y
-CONFIG_POWER_RESET_SYSCON=y
-CONFIG_POWER_SUPPLY=y
-CONFIG_PREEMPT_NONE_BUILD=y
-CONFIG_PTP_1588_CLOCK_OPTIONAL=y
-CONFIG_QUEUED_RWLOCKS=y
-CONFIG_QUEUED_SPINLOCKS=y
-CONFIG_RANDSTRUCT_NONE=y
-CONFIG_RAS=y
-CONFIG_RATIONAL=y
-CONFIG_REGMAP=y
-CONFIG_REGMAP_MMIO=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
-CONFIG_RELOCATABLE=y
-CONFIG_RESET_CONTROLLER=y
-CONFIG_RFS_ACCEL=y
-CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
-CONFIG_RPS=y
-CONFIG_RWSEM_SPIN_ON_OWNER=y
-CONFIG_SERIAL_8250_AIROHA=y
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_FSL=y
-CONFIG_SERIAL_8250_NR_UARTS=5
-CONFIG_SERIAL_8250_RUNTIME_UARTS=5
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_MCTRL_GPIO=y
-CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SGL_ALLOC=y
-CONFIG_SKB_EXTENSIONS=y
-CONFIG_SMP=y
-CONFIG_SOCK_RX_QUEUE_MAPPING=y
-CONFIG_SOC_BUS=y
-CONFIG_SOFTIRQ_ON_OWN_STACK=y
-CONFIG_SPARSEMEM=y
-CONFIG_SPARSEMEM_EXTREME=y
-CONFIG_SPARSEMEM_VMEMMAP=y
-CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_SPI=y
-# CONFIG_SPI_AIROHA_EN7523 is not set
-CONFIG_SPI_AIROHA_SNFI=y
-CONFIG_SPI_MASTER=y
-CONFIG_SPI_MEM=y
-CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
-CONFIG_SWIOTLB=y
-CONFIG_SWPHY=y
-CONFIG_SYSCTL_EXCEPTION_TRACE=y
-CONFIG_THERMAL=y
-CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
-CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
-CONFIG_THERMAL_GOV_STEP_WISE=y
-CONFIG_THERMAL_OF=y
-CONFIG_THREAD_INFO_IN_TASK=y
-CONFIG_TICK_CPU_ACCOUNTING=y
-CONFIG_TIMER_OF=y
-CONFIG_TIMER_PROBE=y
-CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y
-CONFIG_TREE_RCU=y
-CONFIG_TREE_SRCU=y
-CONFIG_UBIFS_FS=y
-# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
-CONFIG_VMAP_STACK=y
-CONFIG_WATCHDOG_CORE=y
-# CONFIG_WLAN is not set
-# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
-CONFIG_XFRM_AH=y
-CONFIG_XFRM_ALGO=y
-CONFIG_XFRM_ESP=y
-CONFIG_XFRM_IPCOMP=y
-CONFIG_XFRM_MIGRATE=y
-CONFIG_XPS=y
-CONFIG_XXHASH=y
-CONFIG_ZLIB_DEFLATE=y
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZONE_DMA32=y
-CONFIG_ZSTD_COMMON=y
-CONFIG_ZSTD_COMPRESS=y
-CONFIG_ZSTD_DECOMPRESS=y
diff --git a/target/linux/airoha/en7523/config-6.6 b/target/linux/airoha/en7523/config-6.6
deleted file mode 100644 (file)
index a40b616..0000000
+++ /dev/null
@@ -1,300 +0,0 @@
-CONFIG_ALIGNMENT_TRAP=y
-CONFIG_ARCH_32BIT_OFF_T=y
-CONFIG_ARCH_AIROHA=y
-CONFIG_ARCH_HIBERNATION_POSSIBLE=y
-CONFIG_ARCH_KEEP_MEMBLOCK=y
-CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
-CONFIG_ARCH_MULTIPLATFORM=y
-CONFIG_ARCH_MULTI_V6_V7=y
-CONFIG_ARCH_MULTI_V7=y
-CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y
-CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y
-CONFIG_ARCH_SELECT_MEMORY_MODEL=y
-CONFIG_ARCH_SPARSEMEM_ENABLE=y
-CONFIG_ARCH_STACKWALK=y
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
-CONFIG_ARM=y
-CONFIG_ARM_AMBA=y
-CONFIG_ARM_ARCH_TIMER=y
-CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
-CONFIG_ARM_CPU_SUSPEND=y
-CONFIG_ARM_GIC=y
-CONFIG_ARM_GIC_V3=y
-CONFIG_ARM_GIC_V3_ITS=y
-CONFIG_ARM_GIC_V3_ITS_PCI=y
-CONFIG_ARM_HAS_GROUP_RELOCS=y
-CONFIG_ARM_HEAVY_MB=y
-# CONFIG_ARM_HIGHBANK_CPUIDLE is not set
-CONFIG_ARM_L1_CACHE_SHIFT=6
-CONFIG_ARM_L1_CACHE_SHIFT_6=y
-CONFIG_ARM_PATCH_IDIV=y
-CONFIG_ARM_PATCH_PHYS_VIRT=y
-CONFIG_ARM_PSCI=y
-CONFIG_ARM_PSCI_FW=y
-# CONFIG_ARM_SMMU is not set
-CONFIG_ARM_THUMB=y
-CONFIG_ARM_UNWIND=y
-CONFIG_ARM_VIRT_EXT=y
-CONFIG_ATAGS=y
-CONFIG_AUTO_ZRELADDR=y
-CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_MQ_PCI=y
-CONFIG_BLK_PM=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_CACHE_L2X0=y
-CONFIG_CC_HAVE_STACKPROTECTOR_TLS=y
-CONFIG_CLONE_BACKWARDS=y
-CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
-CONFIG_CMDLINE_FROM_BOOTLOADER=y
-CONFIG_COMMON_CLK=y
-CONFIG_COMMON_CLK_EN7523=y
-CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
-CONFIG_COMPAT_32BIT_TIME=y
-CONFIG_CONTEXT_TRACKING=y
-CONFIG_CONTEXT_TRACKING_IDLE=y
-CONFIG_CPU_32v6K=y
-CONFIG_CPU_32v7=y
-CONFIG_CPU_ABRT_EV7=y
-CONFIG_CPU_CACHE_V7=y
-CONFIG_CPU_CACHE_VIPT=y
-CONFIG_CPU_COPY_V6=y
-CONFIG_CPU_CP15=y
-CONFIG_CPU_CP15_MMU=y
-CONFIG_CPU_HAS_ASID=y
-CONFIG_CPU_IDLE=y
-CONFIG_CPU_IDLE_GOV_MENU=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MITIGATIONS=y
-CONFIG_CPU_PABRT_V7=y
-CONFIG_CPU_PM=y
-CONFIG_CPU_RMAP=y
-CONFIG_CPU_SPECTRE=y
-CONFIG_CPU_THUMB_CAPABLE=y
-CONFIG_CPU_TLB_V7=y
-CONFIG_CPU_V7=y
-CONFIG_CRC16=y
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_HASH_INFO=y
-CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y
-CONFIG_CRYPTO_LIB_GF128MUL=y
-CONFIG_CRYPTO_LIB_SHA1=y
-CONFIG_CRYPTO_LIB_UTILS=y
-CONFIG_CRYPTO_LZO=y
-CONFIG_CRYPTO_ZSTD=y
-CONFIG_CURRENT_POINTER_IN_TPIDRURO=y
-CONFIG_DCACHE_WORD_ACCESS=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S"
-CONFIG_DEBUG_MISC=y
-CONFIG_DMA_OPS=y
-CONFIG_DTC=y
-CONFIG_EDAC_ATOMIC_SCRUB=y
-CONFIG_EDAC_SUPPORT=y
-CONFIG_EXCLUSIVE_SYSTEM_RAM=y
-CONFIG_FIXED_PHY=y
-CONFIG_FIX_EARLYCON_MEM=y
-CONFIG_FS_IOMAP=y
-CONFIG_FUNCTION_ALIGNMENT=0
-CONFIG_FWNODE_MDIO=y
-CONFIG_FW_LOADER_PAGED_BUF=y
-CONFIG_FW_LOADER_SYSFS=y
-CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND=y
-CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_GENERIC_ARCH_TOPOLOGY=y
-CONFIG_GENERIC_BUG=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
-CONFIG_GENERIC_CPU_AUTOPROBE=y
-CONFIG_GENERIC_CPU_VULNERABILITIES=y
-CONFIG_GENERIC_EARLY_IOREMAP=y
-CONFIG_GENERIC_GETTIMEOFDAY=y
-CONFIG_GENERIC_IDLE_POLL_SETUP=y
-CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
-CONFIG_GENERIC_IRQ_MIGRATION=y
-CONFIG_GENERIC_IRQ_MULTI_HANDLER=y
-CONFIG_GENERIC_IRQ_SHOW=y
-CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
-CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y
-CONFIG_GENERIC_MSI_IRQ=y
-CONFIG_GENERIC_PCI_IOMAP=y
-CONFIG_GENERIC_PHY=y
-CONFIG_GENERIC_PINCONF=y
-CONFIG_GENERIC_PINCTRL_GROUPS=y
-CONFIG_GENERIC_PINMUX_FUNCTIONS=y
-CONFIG_GENERIC_SCHED_CLOCK=y
-CONFIG_GENERIC_SMP_IDLE_THREAD=y
-CONFIG_GENERIC_STRNCPY_FROM_USER=y
-CONFIG_GENERIC_STRNLEN_USER=y
-CONFIG_GENERIC_TIME_VSYSCALL=y
-CONFIG_GENERIC_VDSO_32=y
-CONFIG_GPIOLIB_IRQCHIP=y
-CONFIG_GPIO_CDEV=y
-CONFIG_GPIO_EN7523=y
-CONFIG_GPIO_GENERIC=y
-# CONFIG_HARDEN_BRANCH_HISTORY is not set
-# CONFIG_HARDEN_BRANCH_PREDICTOR is not set
-CONFIG_HARDIRQS_SW_RESEND=y
-CONFIG_HAS_DMA=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT=y
-CONFIG_HAS_IOPORT_MAP=y
-CONFIG_HAVE_SMP=y
-CONFIG_HOTPLUG_CORE_SYNC=y
-CONFIG_HOTPLUG_CORE_SYNC_DEAD=y
-CONFIG_HOTPLUG_CPU=y
-CONFIG_HW_RANDOM=y
-CONFIG_HZ_FIXED=0
-CONFIG_INITRAMFS_SOURCE=""
-# CONFIG_IOMMUFD is not set
-# CONFIG_IOMMU_DEBUGFS is not set
-# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set
-# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set
-CONFIG_IOMMU_SUPPORT=y
-CONFIG_IRQCHIP=y
-CONFIG_IRQSTACKS=y
-CONFIG_IRQ_DOMAIN=y
-CONFIG_IRQ_DOMAIN_HIERARCHY=y
-CONFIG_IRQ_FORCED_THREADING=y
-CONFIG_IRQ_TIME_ACCOUNTING=y
-CONFIG_IRQ_WORK=y
-# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set
-CONFIG_LIBFDT=y
-CONFIG_LOCK_DEBUGGING_SUPPORT=y
-CONFIG_LOCK_SPIN_ON_OWNER=y
-CONFIG_LZO_COMPRESS=y
-CONFIG_LZO_DECOMPRESS=y
-CONFIG_MDIO_BUS=y
-CONFIG_MDIO_DEVICE=y
-CONFIG_MDIO_DEVRES=y
-CONFIG_MFD_SYSCON=y
-CONFIG_MIGHT_HAVE_CACHE_L2X0=y
-CONFIG_MIGRATION=y
-CONFIG_MMU_LAZY_TLB_REFCOUNT=y
-CONFIG_MODULES_USE_ELF_REL=y
-CONFIG_MTD_NAND_CORE=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_NAND_ECC_SW_HAMMING=y
-CONFIG_MTD_SPI_NAND=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_SPLIT_FIRMWARE=y
-CONFIG_MTD_SPLIT_FIT_FW=y
-CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_BEB_LIMIT=20
-CONFIG_MTD_UBI_BLOCK=y
-CONFIG_MTD_UBI_WL_THRESHOLD=4096
-CONFIG_MUTEX_SPIN_ON_OWNER=y
-CONFIG_NEED_DMA_MAP_STATE=y
-CONFIG_NEED_SRCU_NMI_SAFE=y
-CONFIG_NET_EGRESS=y
-CONFIG_NET_FLOW_LIMIT=y
-CONFIG_NET_INGRESS=y
-CONFIG_NET_SELFTESTS=y
-CONFIG_NET_XGRESS=y
-CONFIG_NLS=y
-CONFIG_NO_HZ_COMMON=y
-CONFIG_NO_HZ_IDLE=y
-CONFIG_NR_CPUS=2
-CONFIG_NVMEM=y
-CONFIG_NVMEM_LAYOUTS=y
-CONFIG_NVMEM_SYSFS=y
-CONFIG_OF=y
-CONFIG_OF_ADDRESS=y
-CONFIG_OF_EARLY_FLATTREE=y
-CONFIG_OF_FLATTREE=y
-CONFIG_OF_GPIO=y
-CONFIG_OF_IRQ=y
-CONFIG_OF_KOBJ=y
-CONFIG_OF_MDIO=y
-CONFIG_OLD_SIGACTION=y
-CONFIG_OLD_SIGSUSPEND3=y
-CONFIG_OUTER_CACHE=y
-CONFIG_OUTER_CACHE_SYNC=y
-CONFIG_PADATA=y
-CONFIG_PAGE_OFFSET=0xC0000000
-CONFIG_PAGE_POOL=y
-CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
-CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
-CONFIG_PARTITION_PERCPU=y
-CONFIG_PCI=y
-CONFIG_PCIEAER=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCIE_MEDIATEK=y
-CONFIG_PCIE_PME=y
-CONFIG_PCI_DOMAINS=y
-CONFIG_PCI_DOMAINS_GENERIC=y
-CONFIG_PCI_MSI=y
-CONFIG_PERF_USE_VMALLOC=y
-CONFIG_PGTABLE_LEVELS=2
-CONFIG_PHYLIB=y
-CONFIG_PHYLIB_LEDS=y
-CONFIG_PINCTRL=y
-CONFIG_PM=y
-CONFIG_PM_CLK=y
-CONFIG_PREEMPT_NONE_BUILD=y
-CONFIG_PTP_1588_CLOCK_OPTIONAL=y
-CONFIG_PWM=y
-CONFIG_PWM_SYSFS=y
-CONFIG_RANDSTRUCT_NONE=y
-CONFIG_RAS=y
-CONFIG_RATIONAL=y
-CONFIG_REGMAP=y
-CONFIG_REGMAP_MMIO=y
-CONFIG_RESET_CONTROLLER=y
-CONFIG_RFS_ACCEL=y
-CONFIG_RPS=y
-CONFIG_RWSEM_SPIN_ON_OWNER=y
-CONFIG_SCSI=y
-CONFIG_SCSI_COMMON=y
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_FSL=y
-# CONFIG_SERIAL_8250_SHARE_IRQ is not set
-CONFIG_SERIAL_MCTRL_GPIO=y
-CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SGL_ALLOC=y
-CONFIG_SG_POOL=y
-CONFIG_SMP=y
-CONFIG_SMP_ON_UP=y
-CONFIG_SOCK_RX_QUEUE_MAPPING=y
-CONFIG_SOFTIRQ_ON_OWN_STACK=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_SPI=y
-CONFIG_SPI_AIROHA_EN7523=y
-CONFIG_SPI_MASTER=y
-CONFIG_SPI_MEM=y
-CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
-CONFIG_STACKTRACE=y
-# CONFIG_SWAP is not set
-CONFIG_SWPHY=y
-CONFIG_SWP_EMULATE=y
-CONFIG_SYS_SUPPORTS_APM_EMULATION=y
-CONFIG_THREAD_INFO_IN_TASK=y
-CONFIG_TICK_CPU_ACCOUNTING=y
-CONFIG_TIMER_OF=y
-CONFIG_TIMER_PROBE=y
-CONFIG_TREE_RCU=y
-CONFIG_TREE_SRCU=y
-CONFIG_UBIFS_FS=y
-CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h"
-CONFIG_UNWINDER_ARM=y
-CONFIG_USB=y
-CONFIG_USB_COMMON=y
-CONFIG_USB_SUPPORT=y
-CONFIG_USB_XHCI_HCD=y
-# CONFIG_USB_XHCI_PLATFORM is not set
-CONFIG_USE_OF=y
-# CONFIG_VFP is not set
-CONFIG_WATCHDOG_CORE=y
-# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
-CONFIG_XPS=y
-CONFIG_XXHASH=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_BCJ=y
-CONFIG_ZBOOT_ROM_BSS=0
-CONFIG_ZBOOT_ROM_TEXT=0
-CONFIG_ZLIB_DEFLATE=y
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZSTD_COMMON=y
-CONFIG_ZSTD_COMPRESS=y
-CONFIG_ZSTD_DECOMPRESS=y
diff --git a/target/linux/airoha/patches-6.6/001-v6.10-arm64-add-Airoha-EN7581-platform.patch b/target/linux/airoha/patches-6.6/001-v6.10-arm64-add-Airoha-EN7581-platform.patch
deleted file mode 100644 (file)
index a77ed8c..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-From 428ae88ef519f2009fac37563de76ffa6f93046f Mon Sep 17 00:00:00 2001
-From: Daniel Danzberger <dd@embedd.com>
-Date: Sat, 9 Mar 2024 10:32:16 +0100
-Subject: [PATCH] arm64: add Airoha EN7581 platform
-
-Introduce the Kconfig entry for the Airoha EN7581 multicore architecture
-available in the Airoha EN7581 evaluation board.
-
-Signed-off-by: Daniel Danzberger <dd@embedd.com>
-Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/d52d95db313e6a58ba997ba2181faf78a1014bcc.1709975956.git.lorenzo@kernel.org
-Signed-off-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Signed-off-by: Arnd Bergmann <arnd@arndb.de>
----
- arch/arm64/Kconfig.platforms | 7 +++++++
- 1 file changed, 7 insertions(+)
-
---- a/arch/arm64/Kconfig.platforms
-+++ b/arch/arm64/Kconfig.platforms
-@@ -8,6 +8,13 @@ config ARCH_ACTIONS
-       help
-         This enables support for the Actions Semiconductor S900 SoC family.
-+config ARCH_AIROHA
-+      bool "Airoha SoC Support"
-+      select ARM_PSCI
-+      select HAVE_ARM_ARCH_TIMER
-+      help
-+        This enables support for the ARM64 based Airoha SoCs.
-+
- config ARCH_SUNXI
-       bool "Allwinner sunxi 64-bit SoC Family"
-       select ARCH_HAS_RESET_CONTROLLER
diff --git a/target/linux/airoha/patches-6.6/002-v6.11-i2c-mt7621-Add-Airoha-EN7581-i2c-support.patch b/target/linux/airoha/patches-6.6/002-v6.11-i2c-mt7621-Add-Airoha-EN7581-i2c-support.patch
deleted file mode 100644 (file)
index 46c376e..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-From fd6acb0d21b8683fd8804129beeb4fe629488aff Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 9 Jul 2024 00:42:38 +0200
-Subject: [PATCH] i2c: mt7621: Add Airoha EN7581 i2c support
-
-Introduce i2c support to Airoha EN7581 SoC through the i2c-mt7621
-driver.
-
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Tested-by: Ray Liu <ray.liu@airoha.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
----
- drivers/i2c/busses/Kconfig | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/i2c/busses/Kconfig
-+++ b/drivers/i2c/busses/Kconfig
-@@ -841,7 +841,7 @@ config I2C_MT65XX
- config I2C_MT7621
-       tristate "MT7621/MT7628 I2C Controller"
--      depends on (RALINK && (SOC_MT7620 || SOC_MT7621)) || COMPILE_TEST
-+      depends on (RALINK && (SOC_MT7620 || SOC_MT7621)) || ARCH_AIROHA || COMPILE_TEST
-       help
-         Say Y here to include support for I2C controller in the
-         MediaTek MT7621/MT7628 SoCs.
diff --git a/target/linux/airoha/patches-6.6/006-v6.11-net-airoha-Introduce-ethernet-support-for-EN7581-SoC.patch b/target/linux/airoha/patches-6.6/006-v6.11-net-airoha-Introduce-ethernet-support-for-EN7581-SoC.patch
deleted file mode 100644 (file)
index 253b6fd..0000000
+++ /dev/null
@@ -1,2835 +0,0 @@
-From 23020f04932701d5c8363e60756f12b43b8ed752 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 12 Jul 2024 23:27:58 +0200
-Subject: [PATCH] net: airoha: Introduce ethernet support for EN7581 SoC
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Add airoha_eth driver in order to introduce ethernet support for
-Airoha EN7581 SoC available on EN7581 development board (en7581-evb).
-EN7581 mac controller is mainly composed by the Frame Engine (PSE+PPE)
-and QoS-DMA (QDMA) modules. FE is used for traffic offloading (just
-basic functionalities are currently supported) while QDMA is used for
-DMA operations and QOS functionalities between the mac layer and the
-external modules conncted to the FE GDM ports (e.g MT7530 DSA switch
-or external phys).
-A general overview of airoha_eth architecture is reported below:
-
-               ┌───────┐                                     ┌───────┐
-               │ QDMA2 │                                     │ QDMA1 │
-               └───┬───┘                                     └───┬───┘
-                   │                                             │
-           ┌───────▼─────────────────────────────────────────────▼────────┐
-           │                                                              │
-           │       P5                                            P0       │
-           │                                                              │
-           │                                                              │
-           │                                                              │    ┌──────┐
-           │                                                           P3 ├────► GDM3 │
-           │                                                              │    └──────┘
-           │                                                              │
-           │                                                              │
-┌─────┐    │                                                              │
-│ PPE ◄────┤ P4                          PSE                              │
-└─────┘    │                                                              │
-           │                                                              │
-           │                                                              │
-           │                                                              │    ┌──────┐
-           │                                                           P9 ├────► GDM4 │
-           │                                                              │    └──────┘
-           │                                                              │
-           │                                                              │
-           │                                                              │
-           │        P2                                           P1       │
-           └─────────┬───────────────────────────────────────────┬────────┘
-                     │                                           │
-                 ┌───▼──┐                                     ┌──▼───┐
-                 │ GDM2 │                                     │ GDM1 │
-                 └──────┘                                     └──┬───┘
-                                                                 │
-                                                            ┌────▼─────┐
-                                                            │  MT7530  │
-                                                            └──────────┘
-
-Currently only hw LAN features (QDMA1+GDM1) are available while hw WAN
-(QDMA2+GDM{2,3,4}) ones will be added with subsequent patches introducing
-traffic offloading support.
-
-Tested-by: Benjamin Larsson <benjamin.larsson@genexis.eu>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/274945d2391c195098ab180a46d0617b18b9e42c.1720818878.git.lorenzo@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- MAINTAINERS                                |    9 +
- drivers/net/ethernet/mediatek/Kconfig      |   10 +-
- drivers/net/ethernet/mediatek/Makefile     |    1 +
- drivers/net/ethernet/mediatek/airoha_eth.c | 2730 ++++++++++++++++++++
- 4 files changed, 2749 insertions(+), 1 deletion(-)
- create mode 100644 drivers/net/ethernet/mediatek/airoha_eth.c
-
---- a/drivers/net/ethernet/mediatek/Kconfig
-+++ b/drivers/net/ethernet/mediatek/Kconfig
-@@ -1,12 +1,20 @@
- # SPDX-License-Identifier: GPL-2.0-only
- config NET_VENDOR_MEDIATEK
-       bool "MediaTek devices"
--      depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
-+      depends on ARCH_MEDIATEK || ARCH_AIROHA || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
-       help
-         If you have a Mediatek SoC with ethernet, say Y.
- if NET_VENDOR_MEDIATEK
-+config NET_AIROHA
-+      tristate "Airoha SoC Gigabit Ethernet support"
-+      depends on NET_DSA || !NET_DSA
-+      select PAGE_POOL
-+      help
-+        This driver supports the gigabit ethernet MACs in the
-+        Airoha SoC family.
-+
- config NET_MEDIATEK_SOC_WED
-       depends on ARCH_MEDIATEK || COMPILE_TEST
-       def_bool NET_MEDIATEK_SOC != n
---- a/drivers/net/ethernet/mediatek/Makefile
-+++ b/drivers/net/ethernet/mediatek/Makefile
-@@ -11,3 +11,4 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) +
- endif
- obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
- obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
-+obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
---- /dev/null
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -0,0 +1,2731 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ */
-+#include <linux/etherdevice.h>
-+#include <linux/iopoll.h>
-+#include <linux/kernel.h>
-+#include <linux/netdevice.h>
-+#include <linux/of.h>
-+#include <linux/of_net.h>
-+#include <linux/platform_device.h>
-+#include <linux/reset.h>
-+#include <linux/tcp.h>
-+#include <linux/u64_stats_sync.h>
-+#include <net/dsa.h>
-+#include <net/page_pool/helpers.h>
-+#include <uapi/linux/ppp_defs.h>
-+
-+#define AIROHA_MAX_NUM_GDM_PORTS      1
-+#define AIROHA_MAX_NUM_RSTS           3
-+#define AIROHA_MAX_NUM_XSI_RSTS               5
-+#define AIROHA_MAX_MTU                        2000
-+#define AIROHA_MAX_PACKET_SIZE                2048
-+#define AIROHA_NUM_TX_RING            32
-+#define AIROHA_NUM_RX_RING            32
-+#define AIROHA_FE_MC_MAX_VLAN_TABLE   64
-+#define AIROHA_FE_MC_MAX_VLAN_PORT    16
-+#define AIROHA_NUM_TX_IRQ             2
-+#define HW_DSCP_NUM                   2048
-+#define IRQ_QUEUE_LEN(_n)             ((_n) ? 1024 : 2048)
-+#define TX_DSCP_NUM                   1024
-+#define RX_DSCP_NUM(_n)                       \
-+      ((_n) ==  2 ? 128 :             \
-+       (_n) == 11 ? 128 :             \
-+       (_n) == 15 ? 128 :             \
-+       (_n) ==  0 ? 1024 : 16)
-+
-+#define PSE_RSV_PAGES                 128
-+#define PSE_QUEUE_RSV_PAGES           64
-+
-+/* FE */
-+#define PSE_BASE                      0x0100
-+#define CSR_IFC_BASE                  0x0200
-+#define CDM1_BASE                     0x0400
-+#define GDM1_BASE                     0x0500
-+#define PPE1_BASE                     0x0c00
-+
-+#define CDM2_BASE                     0x1400
-+#define GDM2_BASE                     0x1500
-+
-+#define GDM3_BASE                     0x1100
-+#define GDM4_BASE                     0x2500
-+
-+#define GDM_BASE(_n)                  \
-+      ((_n) == 4 ? GDM4_BASE :        \
-+       (_n) == 3 ? GDM3_BASE :        \
-+       (_n) == 2 ? GDM2_BASE : GDM1_BASE)
-+
-+#define REG_FE_DMA_GLO_CFG            0x0000
-+#define FE_DMA_GLO_L2_SPACE_MASK      GENMASK(7, 4)
-+#define FE_DMA_GLO_PG_SZ_MASK         BIT(3)
-+
-+#define REG_FE_RST_GLO_CFG            0x0004
-+#define FE_RST_GDM4_MBI_ARB_MASK      BIT(3)
-+#define FE_RST_GDM3_MBI_ARB_MASK      BIT(2)
-+#define FE_RST_CORE_MASK              BIT(0)
-+
-+#define REG_FE_LAN_MAC_H              0x0040
-+#define REG_FE_LAN_MAC_LMIN           0x0044
-+#define REG_FE_LAN_MAC_LMAX           0x0048
-+
-+#define REG_FE_CDM1_OQ_MAP0           0x0050
-+#define REG_FE_CDM1_OQ_MAP1           0x0054
-+#define REG_FE_CDM1_OQ_MAP2           0x0058
-+#define REG_FE_CDM1_OQ_MAP3           0x005c
-+
-+#define REG_FE_PCE_CFG                        0x0070
-+#define PCE_DPI_EN_MASK                       BIT(2)
-+#define PCE_KA_EN_MASK                        BIT(1)
-+#define PCE_MC_EN_MASK                        BIT(0)
-+
-+#define REG_FE_PSE_QUEUE_CFG_WR               0x0080
-+#define PSE_CFG_PORT_ID_MASK          GENMASK(27, 24)
-+#define PSE_CFG_QUEUE_ID_MASK         GENMASK(20, 16)
-+#define PSE_CFG_WR_EN_MASK            BIT(8)
-+#define PSE_CFG_OQRSV_SEL_MASK                BIT(0)
-+
-+#define REG_FE_PSE_QUEUE_CFG_VAL      0x0084
-+#define PSE_CFG_OQ_RSV_MASK           GENMASK(13, 0)
-+
-+#define PSE_FQ_CFG                    0x008c
-+#define PSE_FQ_LIMIT_MASK             GENMASK(14, 0)
-+
-+#define REG_FE_PSE_BUF_SET            0x0090
-+#define PSE_SHARE_USED_LTHD_MASK      GENMASK(31, 16)
-+#define PSE_ALLRSV_MASK                       GENMASK(14, 0)
-+
-+#define REG_PSE_SHARE_USED_THD                0x0094
-+#define PSE_SHARE_USED_MTHD_MASK      GENMASK(31, 16)
-+#define PSE_SHARE_USED_HTHD_MASK      GENMASK(15, 0)
-+
-+#define REG_GDM_MISC_CFG              0x0148
-+#define GDM2_RDM_ACK_WAIT_PREF_MASK   BIT(9)
-+#define GDM2_CHN_VLD_MODE_MASK                BIT(5)
-+
-+#define REG_FE_CSR_IFC_CFG            CSR_IFC_BASE
-+#define FE_IFC_EN_MASK                        BIT(0)
-+
-+#define REG_FE_VIP_PORT_EN            0x01f0
-+#define REG_FE_IFC_PORT_EN            0x01f4
-+
-+#define REG_PSE_IQ_REV1                       (PSE_BASE + 0x08)
-+#define PSE_IQ_RES1_P2_MASK           GENMASK(23, 16)
-+
-+#define REG_PSE_IQ_REV2                       (PSE_BASE + 0x0c)
-+#define PSE_IQ_RES2_P5_MASK           GENMASK(15, 8)
-+#define PSE_IQ_RES2_P4_MASK           GENMASK(7, 0)
-+
-+#define REG_FE_VIP_EN(_n)             (0x0300 + ((_n) << 3))
-+#define PATN_FCPU_EN_MASK             BIT(7)
-+#define PATN_SWP_EN_MASK              BIT(6)
-+#define PATN_DP_EN_MASK                       BIT(5)
-+#define PATN_SP_EN_MASK                       BIT(4)
-+#define PATN_TYPE_MASK                        GENMASK(3, 1)
-+#define PATN_EN_MASK                  BIT(0)
-+
-+#define REG_FE_VIP_PATN(_n)           (0x0304 + ((_n) << 3))
-+#define PATN_DP_MASK                  GENMASK(31, 16)
-+#define PATN_SP_MASK                  GENMASK(15, 0)
-+
-+#define REG_CDM1_VLAN_CTRL            CDM1_BASE
-+#define CDM1_VLAN_MASK                        GENMASK(31, 16)
-+
-+#define REG_CDM1_FWD_CFG              (CDM1_BASE + 0x08)
-+#define CDM1_VIP_QSEL_MASK            GENMASK(24, 20)
-+
-+#define REG_CDM1_CRSN_QSEL(_n)                (CDM1_BASE + 0x10 + ((_n) << 2))
-+#define CDM1_CRSN_QSEL_REASON_MASK(_n)        \
-+      GENMASK(4 + (((_n) % 4) << 3),  (((_n) % 4) << 3))
-+
-+#define REG_CDM2_FWD_CFG              (CDM2_BASE + 0x08)
-+#define CDM2_OAM_QSEL_MASK            GENMASK(31, 27)
-+#define CDM2_VIP_QSEL_MASK            GENMASK(24, 20)
-+
-+#define REG_CDM2_CRSN_QSEL(_n)                (CDM2_BASE + 0x10 + ((_n) << 2))
-+#define CDM2_CRSN_QSEL_REASON_MASK(_n)        \
-+      GENMASK(4 + (((_n) % 4) << 3),  (((_n) % 4) << 3))
-+
-+#define REG_GDM_FWD_CFG(_n)           GDM_BASE(_n)
-+#define GDM_DROP_CRC_ERR              BIT(23)
-+#define GDM_IP4_CKSUM                 BIT(22)
-+#define GDM_TCP_CKSUM                 BIT(21)
-+#define GDM_UDP_CKSUM                 BIT(20)
-+#define GDM_UCFQ_MASK                 GENMASK(15, 12)
-+#define GDM_BCFQ_MASK                 GENMASK(11, 8)
-+#define GDM_MCFQ_MASK                 GENMASK(7, 4)
-+#define GDM_OCFQ_MASK                 GENMASK(3, 0)
-+
-+#define REG_GDM_INGRESS_CFG(_n)               (GDM_BASE(_n) + 0x10)
-+#define GDM_INGRESS_FC_EN_MASK                BIT(1)
-+#define GDM_STAG_EN_MASK              BIT(0)
-+
-+#define REG_GDM_LEN_CFG(_n)           (GDM_BASE(_n) + 0x14)
-+#define GDM_SHORT_LEN_MASK            GENMASK(13, 0)
-+#define GDM_LONG_LEN_MASK             GENMASK(29, 16)
-+
-+#define REG_FE_CPORT_CFG              (GDM1_BASE + 0x40)
-+#define FE_CPORT_PAD                  BIT(26)
-+#define FE_CPORT_PORT_XFC_MASK                BIT(25)
-+#define FE_CPORT_QUEUE_XFC_MASK               BIT(24)
-+
-+#define REG_FE_GDM_MIB_CLEAR(_n)      (GDM_BASE(_n) + 0xf0)
-+#define FE_GDM_MIB_RX_CLEAR_MASK      BIT(1)
-+#define FE_GDM_MIB_TX_CLEAR_MASK      BIT(0)
-+
-+#define REG_FE_GDM1_MIB_CFG           (GDM1_BASE + 0xf4)
-+#define FE_STRICT_RFC2819_MODE_MASK   BIT(31)
-+#define FE_GDM1_TX_MIB_SPLIT_EN_MASK  BIT(17)
-+#define FE_GDM1_RX_MIB_SPLIT_EN_MASK  BIT(16)
-+#define FE_TX_MIB_ID_MASK             GENMASK(15, 8)
-+#define FE_RX_MIB_ID_MASK             GENMASK(7, 0)
-+
-+#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n)                (GDM_BASE(_n) + 0x104)
-+#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n)               (GDM_BASE(_n) + 0x10c)
-+#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n)               (GDM_BASE(_n) + 0x110)
-+#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n)      (GDM_BASE(_n) + 0x114)
-+#define REG_FE_GDM_TX_ETH_DROP_CNT(_n)                (GDM_BASE(_n) + 0x118)
-+#define REG_FE_GDM_TX_ETH_BC_CNT(_n)          (GDM_BASE(_n) + 0x11c)
-+#define REG_FE_GDM_TX_ETH_MC_CNT(_n)          (GDM_BASE(_n) + 0x120)
-+#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n)                (GDM_BASE(_n) + 0x124)
-+#define REG_FE_GDM_TX_ETH_LONG_CNT(_n)                (GDM_BASE(_n) + 0x128)
-+#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n)               (GDM_BASE(_n) + 0x12c)
-+#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n)               (GDM_BASE(_n) + 0x130)
-+#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n)      (GDM_BASE(_n) + 0x134)
-+#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n)      (GDM_BASE(_n) + 0x138)
-+#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n)      (GDM_BASE(_n) + 0x13c)
-+#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n)     (GDM_BASE(_n) + 0x140)
-+
-+#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n)                (GDM_BASE(_n) + 0x148)
-+#define REG_FE_GDM_RX_FC_DROP_CNT(_n)         (GDM_BASE(_n) + 0x14c)
-+#define REG_FE_GDM_RX_RC_DROP_CNT(_n)         (GDM_BASE(_n) + 0x150)
-+#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n)   (GDM_BASE(_n) + 0x154)
-+#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n)      (GDM_BASE(_n) + 0x158)
-+#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n)               (GDM_BASE(_n) + 0x15c)
-+#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n)               (GDM_BASE(_n) + 0x160)
-+#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n)      (GDM_BASE(_n) + 0x164)
-+#define REG_FE_GDM_RX_ETH_DROP_CNT(_n)                (GDM_BASE(_n) + 0x168)
-+#define REG_FE_GDM_RX_ETH_BC_CNT(_n)          (GDM_BASE(_n) + 0x16c)
-+#define REG_FE_GDM_RX_ETH_MC_CNT(_n)          (GDM_BASE(_n) + 0x170)
-+#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n)     (GDM_BASE(_n) + 0x174)
-+#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n)                (GDM_BASE(_n) + 0x178)
-+#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n)      (GDM_BASE(_n) + 0x17c)
-+#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n)                (GDM_BASE(_n) + 0x180)
-+#define REG_FE_GDM_RX_ETH_LONG_CNT(_n)                (GDM_BASE(_n) + 0x184)
-+#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n)               (GDM_BASE(_n) + 0x188)
-+#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n)               (GDM_BASE(_n) + 0x18c)
-+#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n)      (GDM_BASE(_n) + 0x190)
-+#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n)      (GDM_BASE(_n) + 0x194)
-+#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n)      (GDM_BASE(_n) + 0x198)
-+#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n)     (GDM_BASE(_n) + 0x19c)
-+
-+#define REG_PPE1_TB_HASH_CFG          (PPE1_BASE + 0x250)
-+#define PPE1_SRAM_TABLE_EN_MASK               BIT(0)
-+#define PPE1_SRAM_HASH1_EN_MASK               BIT(8)
-+#define PPE1_DRAM_TABLE_EN_MASK               BIT(16)
-+#define PPE1_DRAM_HASH1_EN_MASK               BIT(24)
-+
-+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n)                (GDM_BASE(_n) + 0x280)
-+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n)               (GDM_BASE(_n) + 0x284)
-+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n)               (GDM_BASE(_n) + 0x288)
-+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n)      (GDM_BASE(_n) + 0x28c)
-+
-+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n)                (GDM_BASE(_n) + 0x290)
-+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n)               (GDM_BASE(_n) + 0x294)
-+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n)               (GDM_BASE(_n) + 0x298)
-+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n)      (GDM_BASE(_n) + 0x29c)
-+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n)               (GDM_BASE(_n) + 0x2b8)
-+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n)               (GDM_BASE(_n) + 0x2bc)
-+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n)      (GDM_BASE(_n) + 0x2c0)
-+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n)      (GDM_BASE(_n) + 0x2c4)
-+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n)      (GDM_BASE(_n) + 0x2c8)
-+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n)     (GDM_BASE(_n) + 0x2cc)
-+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n)               (GDM_BASE(_n) + 0x2e8)
-+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n)               (GDM_BASE(_n) + 0x2ec)
-+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n)      (GDM_BASE(_n) + 0x2f0)
-+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n)      (GDM_BASE(_n) + 0x2f4)
-+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n)      (GDM_BASE(_n) + 0x2f8)
-+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n)     (GDM_BASE(_n) + 0x2fc)
-+
-+#define REG_GDM2_CHN_RLS              (GDM2_BASE + 0x20)
-+#define MBI_RX_AGE_SEL_MASK           GENMASK(18, 17)
-+#define MBI_TX_AGE_SEL_MASK           GENMASK(18, 17)
-+
-+#define REG_GDM3_FWD_CFG              GDM3_BASE
-+#define GDM3_PAD_EN_MASK              BIT(28)
-+
-+#define REG_GDM4_FWD_CFG              (GDM4_BASE + 0x100)
-+#define GDM4_PAD_EN_MASK              BIT(28)
-+#define GDM4_SPORT_OFFSET0_MASK               GENMASK(11, 8)
-+
-+#define REG_GDM4_SRC_PORT_SET         (GDM4_BASE + 0x33c)
-+#define GDM4_SPORT_OFF2_MASK          GENMASK(19, 16)
-+#define GDM4_SPORT_OFF1_MASK          GENMASK(15, 12)
-+#define GDM4_SPORT_OFF0_MASK          GENMASK(11, 8)
-+
-+#define REG_IP_FRAG_FP                        0x2010
-+#define IP_ASSEMBLE_PORT_MASK         GENMASK(24, 21)
-+#define IP_ASSEMBLE_NBQ_MASK          GENMASK(20, 16)
-+#define IP_FRAGMENT_PORT_MASK         GENMASK(8, 5)
-+#define IP_FRAGMENT_NBQ_MASK          GENMASK(4, 0)
-+
-+#define REG_MC_VLAN_EN                        0x2100
-+#define MC_VLAN_EN_MASK                       BIT(0)
-+
-+#define REG_MC_VLAN_CFG                       0x2104
-+#define MC_VLAN_CFG_CMD_DONE_MASK     BIT(31)
-+#define MC_VLAN_CFG_TABLE_ID_MASK     GENMASK(21, 16)
-+#define MC_VLAN_CFG_PORT_ID_MASK      GENMASK(11, 8)
-+#define MC_VLAN_CFG_TABLE_SEL_MASK    BIT(4)
-+#define MC_VLAN_CFG_RW_MASK           BIT(0)
-+
-+#define REG_MC_VLAN_DATA              0x2108
-+
-+#define REG_CDM5_RX_OQ1_DROP_CNT      0x29d4
-+
-+/* QDMA */
-+#define REG_QDMA_GLOBAL_CFG                   0x0004
-+#define GLOBAL_CFG_RX_2B_OFFSET_MASK          BIT(31)
-+#define GLOBAL_CFG_DMA_PREFERENCE_MASK                GENMASK(30, 29)
-+#define GLOBAL_CFG_CPU_TXR_RR_MASK            BIT(28)
-+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK                BIT(27)
-+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK     BIT(26)
-+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK   BIT(25)
-+#define GLOBAL_CFG_OAM_MODIFY_MASK            BIT(24)
-+#define GLOBAL_CFG_RESET_MASK                 BIT(23)
-+#define GLOBAL_CFG_RESET_DONE_MASK            BIT(22)
-+#define GLOBAL_CFG_MULTICAST_EN_MASK          BIT(21)
-+#define GLOBAL_CFG_IRQ1_EN_MASK                       BIT(20)
-+#define GLOBAL_CFG_IRQ0_EN_MASK                       BIT(19)
-+#define GLOBAL_CFG_LOOPCNT_EN_MASK            BIT(18)
-+#define GLOBAL_CFG_RD_BYPASS_WR_MASK          BIT(17)
-+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK         BIT(16)
-+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK          GENMASK(13, 8)
-+#define GLOBAL_CFG_CHECK_DONE_MASK            BIT(7)
-+#define GLOBAL_CFG_TX_WB_DONE_MASK            BIT(6)
-+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK         GENMASK(5, 4)
-+#define GLOBAL_CFG_RX_DMA_BUSY_MASK           BIT(3)
-+#define GLOBAL_CFG_RX_DMA_EN_MASK             BIT(2)
-+#define GLOBAL_CFG_TX_DMA_BUSY_MASK           BIT(1)
-+#define GLOBAL_CFG_TX_DMA_EN_MASK             BIT(0)
-+
-+#define REG_FWD_DSCP_BASE                     0x0010
-+#define REG_FWD_BUF_BASE                      0x0014
-+
-+#define REG_HW_FWD_DSCP_CFG                   0x0018
-+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK         GENMASK(29, 28)
-+#define HW_FWD_DSCP_SCATTER_LEN_MASK          GENMASK(17, 16)
-+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK      GENMASK(15, 0)
-+
-+#define REG_INT_STATUS(_n)            \
-+      (((_n) == 4) ? 0x0730 :         \
-+       ((_n) == 3) ? 0x0724 :         \
-+       ((_n) == 2) ? 0x0720 :         \
-+       ((_n) == 1) ? 0x0024 : 0x0020)
-+
-+#define REG_INT_ENABLE(_n)            \
-+      (((_n) == 4) ? 0x0750 :         \
-+       ((_n) == 3) ? 0x0744 :         \
-+       ((_n) == 2) ? 0x0740 :         \
-+       ((_n) == 1) ? 0x002c : 0x0028)
-+
-+/* QDMA_CSR_INT_ENABLE1 */
-+#define RX15_COHERENT_INT_MASK                BIT(31)
-+#define RX14_COHERENT_INT_MASK                BIT(30)
-+#define RX13_COHERENT_INT_MASK                BIT(29)
-+#define RX12_COHERENT_INT_MASK                BIT(28)
-+#define RX11_COHERENT_INT_MASK                BIT(27)
-+#define RX10_COHERENT_INT_MASK                BIT(26)
-+#define RX9_COHERENT_INT_MASK         BIT(25)
-+#define RX8_COHERENT_INT_MASK         BIT(24)
-+#define RX7_COHERENT_INT_MASK         BIT(23)
-+#define RX6_COHERENT_INT_MASK         BIT(22)
-+#define RX5_COHERENT_INT_MASK         BIT(21)
-+#define RX4_COHERENT_INT_MASK         BIT(20)
-+#define RX3_COHERENT_INT_MASK         BIT(19)
-+#define RX2_COHERENT_INT_MASK         BIT(18)
-+#define RX1_COHERENT_INT_MASK         BIT(17)
-+#define RX0_COHERENT_INT_MASK         BIT(16)
-+#define TX7_COHERENT_INT_MASK         BIT(15)
-+#define TX6_COHERENT_INT_MASK         BIT(14)
-+#define TX5_COHERENT_INT_MASK         BIT(13)
-+#define TX4_COHERENT_INT_MASK         BIT(12)
-+#define TX3_COHERENT_INT_MASK         BIT(11)
-+#define TX2_COHERENT_INT_MASK         BIT(10)
-+#define TX1_COHERENT_INT_MASK         BIT(9)
-+#define TX0_COHERENT_INT_MASK         BIT(8)
-+#define CNT_OVER_FLOW_INT_MASK                BIT(7)
-+#define IRQ1_FULL_INT_MASK            BIT(5)
-+#define IRQ1_INT_MASK                 BIT(4)
-+#define HWFWD_DSCP_LOW_INT_MASK               BIT(3)
-+#define HWFWD_DSCP_EMPTY_INT_MASK     BIT(2)
-+#define IRQ0_FULL_INT_MASK            BIT(1)
-+#define IRQ0_INT_MASK                 BIT(0)
-+
-+#define TX_DONE_INT_MASK(_n)                                  \
-+      ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK              \
-+            : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-+
-+#define INT_TX_MASK                                           \
-+      (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK |                   \
-+       IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-+
-+#define INT_IDX0_MASK                                         \
-+      (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK |        \
-+       TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK |        \
-+       TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK |        \
-+       TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK |        \
-+       RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK |        \
-+       RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK |        \
-+       RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK |        \
-+       RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK |        \
-+       RX15_COHERENT_INT_MASK | INT_TX_MASK)
-+
-+/* QDMA_CSR_INT_ENABLE2 */
-+#define RX15_NO_CPU_DSCP_INT_MASK     BIT(31)
-+#define RX14_NO_CPU_DSCP_INT_MASK     BIT(30)
-+#define RX13_NO_CPU_DSCP_INT_MASK     BIT(29)
-+#define RX12_NO_CPU_DSCP_INT_MASK     BIT(28)
-+#define RX11_NO_CPU_DSCP_INT_MASK     BIT(27)
-+#define RX10_NO_CPU_DSCP_INT_MASK     BIT(26)
-+#define RX9_NO_CPU_DSCP_INT_MASK      BIT(25)
-+#define RX8_NO_CPU_DSCP_INT_MASK      BIT(24)
-+#define RX7_NO_CPU_DSCP_INT_MASK      BIT(23)
-+#define RX6_NO_CPU_DSCP_INT_MASK      BIT(22)
-+#define RX5_NO_CPU_DSCP_INT_MASK      BIT(21)
-+#define RX4_NO_CPU_DSCP_INT_MASK      BIT(20)
-+#define RX3_NO_CPU_DSCP_INT_MASK      BIT(19)
-+#define RX2_NO_CPU_DSCP_INT_MASK      BIT(18)
-+#define RX1_NO_CPU_DSCP_INT_MASK      BIT(17)
-+#define RX0_NO_CPU_DSCP_INT_MASK      BIT(16)
-+#define RX15_DONE_INT_MASK            BIT(15)
-+#define RX14_DONE_INT_MASK            BIT(14)
-+#define RX13_DONE_INT_MASK            BIT(13)
-+#define RX12_DONE_INT_MASK            BIT(12)
-+#define RX11_DONE_INT_MASK            BIT(11)
-+#define RX10_DONE_INT_MASK            BIT(10)
-+#define RX9_DONE_INT_MASK             BIT(9)
-+#define RX8_DONE_INT_MASK             BIT(8)
-+#define RX7_DONE_INT_MASK             BIT(7)
-+#define RX6_DONE_INT_MASK             BIT(6)
-+#define RX5_DONE_INT_MASK             BIT(5)
-+#define RX4_DONE_INT_MASK             BIT(4)
-+#define RX3_DONE_INT_MASK             BIT(3)
-+#define RX2_DONE_INT_MASK             BIT(2)
-+#define RX1_DONE_INT_MASK             BIT(1)
-+#define RX0_DONE_INT_MASK             BIT(0)
-+
-+#define RX_DONE_INT_MASK                                      \
-+      (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK |                \
-+       RX2_DONE_INT_MASK | RX3_DONE_INT_MASK |                \
-+       RX4_DONE_INT_MASK | RX7_DONE_INT_MASK |                \
-+       RX8_DONE_INT_MASK | RX9_DONE_INT_MASK |                \
-+       RX15_DONE_INT_MASK)
-+#define INT_IDX1_MASK                                         \
-+      (RX_DONE_INT_MASK |                                     \
-+       RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK |  \
-+       RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK |  \
-+       RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK |  \
-+       RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK |  \
-+       RX15_NO_CPU_DSCP_INT_MASK)
-+
-+/* QDMA_CSR_INT_ENABLE5 */
-+#define TX31_COHERENT_INT_MASK                BIT(31)
-+#define TX30_COHERENT_INT_MASK                BIT(30)
-+#define TX29_COHERENT_INT_MASK                BIT(29)
-+#define TX28_COHERENT_INT_MASK                BIT(28)
-+#define TX27_COHERENT_INT_MASK                BIT(27)
-+#define TX26_COHERENT_INT_MASK                BIT(26)
-+#define TX25_COHERENT_INT_MASK                BIT(25)
-+#define TX24_COHERENT_INT_MASK                BIT(24)
-+#define TX23_COHERENT_INT_MASK                BIT(23)
-+#define TX22_COHERENT_INT_MASK                BIT(22)
-+#define TX21_COHERENT_INT_MASK                BIT(21)
-+#define TX20_COHERENT_INT_MASK                BIT(20)
-+#define TX19_COHERENT_INT_MASK                BIT(19)
-+#define TX18_COHERENT_INT_MASK                BIT(18)
-+#define TX17_COHERENT_INT_MASK                BIT(17)
-+#define TX16_COHERENT_INT_MASK                BIT(16)
-+#define TX15_COHERENT_INT_MASK                BIT(15)
-+#define TX14_COHERENT_INT_MASK                BIT(14)
-+#define TX13_COHERENT_INT_MASK                BIT(13)
-+#define TX12_COHERENT_INT_MASK                BIT(12)
-+#define TX11_COHERENT_INT_MASK                BIT(11)
-+#define TX10_COHERENT_INT_MASK                BIT(10)
-+#define TX9_COHERENT_INT_MASK         BIT(9)
-+#define TX8_COHERENT_INT_MASK         BIT(8)
-+
-+#define INT_IDX4_MASK                                         \
-+      (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK |        \
-+       TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK |      \
-+       TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK |      \
-+       TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK |      \
-+       TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK |      \
-+       TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK |      \
-+       TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK |      \
-+       TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK |      \
-+       TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK |      \
-+       TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK |      \
-+       TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK |      \
-+       TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
-+
-+#define REG_TX_IRQ_BASE(_n)           ((_n) ? 0x0048 : 0x0050)
-+
-+#define REG_TX_IRQ_CFG(_n)            ((_n) ? 0x004c : 0x0054)
-+#define TX_IRQ_THR_MASK                       GENMASK(27, 16)
-+#define TX_IRQ_DEPTH_MASK             GENMASK(11, 0)
-+
-+#define REG_IRQ_CLEAR_LEN(_n)         ((_n) ? 0x0064 : 0x0058)
-+#define IRQ_CLEAR_LEN_MASK            GENMASK(7, 0)
-+
-+#define REG_IRQ_STATUS(_n)            ((_n) ? 0x0068 : 0x005c)
-+#define IRQ_ENTRY_LEN_MASK            GENMASK(27, 16)
-+#define IRQ_HEAD_IDX_MASK             GENMASK(11, 0)
-+
-+#define REG_TX_RING_BASE(_n)  \
-+      (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
-+
-+#define REG_TX_RING_BLOCKING(_n)      \
-+      (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
-+
-+#define TX_RING_IRQ_BLOCKING_MAP_MASK                 BIT(6)
-+#define TX_RING_IRQ_BLOCKING_CFG_MASK                 BIT(4)
-+#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK          BIT(2)
-+#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK    BIT(1)
-+#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK    BIT(0)
-+
-+#define REG_TX_CPU_IDX(_n)    \
-+      (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
-+
-+#define TX_RING_CPU_IDX_MASK          GENMASK(15, 0)
-+
-+#define REG_TX_DMA_IDX(_n)    \
-+      (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
-+
-+#define TX_RING_DMA_IDX_MASK          GENMASK(15, 0)
-+
-+#define IRQ_RING_IDX_MASK             GENMASK(20, 16)
-+#define IRQ_DESC_IDX_MASK             GENMASK(15, 0)
-+
-+#define REG_RX_RING_BASE(_n)  \
-+      (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
-+
-+#define REG_RX_RING_SIZE(_n)  \
-+      (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
-+
-+#define RX_RING_THR_MASK              GENMASK(31, 16)
-+#define RX_RING_SIZE_MASK             GENMASK(15, 0)
-+
-+#define REG_RX_CPU_IDX(_n)    \
-+      (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
-+
-+#define RX_RING_CPU_IDX_MASK          GENMASK(15, 0)
-+
-+#define REG_RX_DMA_IDX(_n)    \
-+      (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
-+
-+#define REG_RX_DELAY_INT_IDX(_n)      \
-+      (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
-+
-+#define RX_DELAY_INT_MASK             GENMASK(15, 0)
-+
-+#define RX_RING_DMA_IDX_MASK          GENMASK(15, 0)
-+
-+#define REG_INGRESS_TRTCM_CFG         0x0070
-+#define INGRESS_TRTCM_EN_MASK         BIT(31)
-+#define INGRESS_TRTCM_MODE_MASK               BIT(30)
-+#define INGRESS_SLOW_TICK_RATIO_MASK  GENMASK(29, 16)
-+#define INGRESS_FAST_TICK_MASK                GENMASK(15, 0)
-+
-+#define REG_TXQ_DIS_CFG_BASE(_n)      ((_n) ? 0x20a0 : 0x00a0)
-+#define REG_TXQ_DIS_CFG(_n, _m)               (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
-+
-+#define REG_LMGR_INIT_CFG             0x1000
-+#define LMGR_INIT_START                       BIT(31)
-+#define LMGR_SRAM_MODE_MASK           BIT(30)
-+#define HW_FWD_PKTSIZE_OVERHEAD_MASK  GENMASK(27, 20)
-+#define HW_FWD_DESC_NUM_MASK          GENMASK(16, 0)
-+
-+#define REG_FWD_DSCP_LOW_THR          0x1004
-+#define FWD_DSCP_LOW_THR_MASK         GENMASK(17, 0)
-+
-+#define REG_EGRESS_RATE_METER_CFG             0x100c
-+#define EGRESS_RATE_METER_EN_MASK             BIT(29)
-+#define EGRESS_RATE_METER_EQ_RATE_EN_MASK     BIT(17)
-+#define EGRESS_RATE_METER_WINDOW_SZ_MASK      GENMASK(16, 12)
-+#define EGRESS_RATE_METER_TIMESLICE_MASK      GENMASK(10, 0)
-+
-+#define REG_EGRESS_TRTCM_CFG          0x1010
-+#define EGRESS_TRTCM_EN_MASK          BIT(31)
-+#define EGRESS_TRTCM_MODE_MASK                BIT(30)
-+#define EGRESS_SLOW_TICK_RATIO_MASK   GENMASK(29, 16)
-+#define EGRESS_FAST_TICK_MASK         GENMASK(15, 0)
-+
-+#define REG_TXWRR_MODE_CFG            0x1020
-+#define TWRR_WEIGHT_SCALE_MASK                BIT(31)
-+#define TWRR_WEIGHT_BASE_MASK         BIT(3)
-+
-+#define REG_PSE_BUF_USAGE_CFG         0x1028
-+#define PSE_BUF_ESTIMATE_EN_MASK      BIT(29)
-+
-+#define REG_GLB_TRTCM_CFG             0x1080
-+#define GLB_TRTCM_EN_MASK             BIT(31)
-+#define GLB_TRTCM_MODE_MASK           BIT(30)
-+#define GLB_SLOW_TICK_RATIO_MASK      GENMASK(29, 16)
-+#define GLB_FAST_TICK_MASK            GENMASK(15, 0)
-+
-+#define REG_TXQ_CNGST_CFG             0x10a0
-+#define TXQ_CNGST_DROP_EN             BIT(31)
-+#define TXQ_CNGST_DEI_DROP_EN         BIT(30)
-+
-+#define REG_SLA_TRTCM_CFG             0x1150
-+#define SLA_TRTCM_EN_MASK             BIT(31)
-+#define SLA_TRTCM_MODE_MASK           BIT(30)
-+#define SLA_SLOW_TICK_RATIO_MASK      GENMASK(29, 16)
-+#define SLA_FAST_TICK_MASK            GENMASK(15, 0)
-+
-+/* CTRL */
-+#define QDMA_DESC_DONE_MASK           BIT(31)
-+#define QDMA_DESC_DROP_MASK           BIT(30) /* tx: drop - rx: overflow */
-+#define QDMA_DESC_MORE_MASK           BIT(29) /* more SG elements */
-+#define QDMA_DESC_DEI_MASK            BIT(25)
-+#define QDMA_DESC_NO_DROP_MASK                BIT(24)
-+#define QDMA_DESC_LEN_MASK            GENMASK(15, 0)
-+/* DATA */
-+#define QDMA_DESC_NEXT_ID_MASK                GENMASK(15, 0)
-+/* TX MSG0 */
-+#define QDMA_ETH_TXMSG_MIC_IDX_MASK   BIT(30)
-+#define QDMA_ETH_TXMSG_SP_TAG_MASK    GENMASK(29, 14)
-+#define QDMA_ETH_TXMSG_ICO_MASK               BIT(13)
-+#define QDMA_ETH_TXMSG_UCO_MASK               BIT(12)
-+#define QDMA_ETH_TXMSG_TCO_MASK               BIT(11)
-+#define QDMA_ETH_TXMSG_TSO_MASK               BIT(10)
-+#define QDMA_ETH_TXMSG_FAST_MASK      BIT(9)
-+#define QDMA_ETH_TXMSG_OAM_MASK               BIT(8)
-+#define QDMA_ETH_TXMSG_CHAN_MASK      GENMASK(7, 3)
-+#define QDMA_ETH_TXMSG_QUEUE_MASK     GENMASK(2, 0)
-+/* TX MSG1 */
-+#define QDMA_ETH_TXMSG_NO_DROP                BIT(31)
-+#define QDMA_ETH_TXMSG_METER_MASK     GENMASK(30, 24) /* 0x7f no meters */
-+#define QDMA_ETH_TXMSG_FPORT_MASK     GENMASK(23, 20)
-+#define QDMA_ETH_TXMSG_NBOQ_MASK      GENMASK(19, 15)
-+#define QDMA_ETH_TXMSG_HWF_MASK               BIT(14)
-+#define QDMA_ETH_TXMSG_HOP_MASK               BIT(13)
-+#define QDMA_ETH_TXMSG_PTP_MASK               BIT(12)
-+#define QDMA_ETH_TXMSG_ACNT_G1_MASK   GENMASK(10, 6)  /* 0x1f do not count */
-+#define QDMA_ETH_TXMSG_ACNT_G0_MASK   GENMASK(5, 0)   /* 0x3f do not count */
-+
-+/* RX MSG1 */
-+#define QDMA_ETH_RXMSG_DEI_MASK               BIT(31)
-+#define QDMA_ETH_RXMSG_IP6_MASK               BIT(30)
-+#define QDMA_ETH_RXMSG_IP4_MASK               BIT(29)
-+#define QDMA_ETH_RXMSG_IP4F_MASK      BIT(28)
-+#define QDMA_ETH_RXMSG_L4_VALID_MASK  BIT(27)
-+#define QDMA_ETH_RXMSG_L4F_MASK               BIT(26)
-+#define QDMA_ETH_RXMSG_SPORT_MASK     GENMASK(25, 21)
-+#define QDMA_ETH_RXMSG_CRSN_MASK      GENMASK(20, 16)
-+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
-+
-+struct airoha_qdma_desc {
-+      __le32 rsv;
-+      __le32 ctrl;
-+      __le32 addr;
-+      __le32 data;
-+      __le32 msg0;
-+      __le32 msg1;
-+      __le32 msg2;
-+      __le32 msg3;
-+};
-+
-+/* CTRL0 */
-+#define QDMA_FWD_DESC_CTX_MASK                BIT(31)
-+#define QDMA_FWD_DESC_RING_MASK               GENMASK(30, 28)
-+#define QDMA_FWD_DESC_IDX_MASK                GENMASK(27, 16)
-+#define QDMA_FWD_DESC_LEN_MASK                GENMASK(15, 0)
-+/* CTRL1 */
-+#define QDMA_FWD_DESC_FIRST_IDX_MASK  GENMASK(15, 0)
-+/* CTRL2 */
-+#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK       GENMASK(2, 0)
-+
-+struct airoha_qdma_fwd_desc {
-+      __le32 addr;
-+      __le32 ctrl0;
-+      __le32 ctrl1;
-+      __le32 ctrl2;
-+      __le32 msg0;
-+      __le32 msg1;
-+      __le32 rsv0;
-+      __le32 rsv1;
-+};
-+
-+enum {
-+      QDMA_INT_REG_IDX0,
-+      QDMA_INT_REG_IDX1,
-+      QDMA_INT_REG_IDX2,
-+      QDMA_INT_REG_IDX3,
-+      QDMA_INT_REG_IDX4,
-+      QDMA_INT_REG_MAX
-+};
-+
-+enum {
-+      XSI_PCIE0_PORT,
-+      XSI_PCIE1_PORT,
-+      XSI_USB_PORT,
-+      XSI_AE_PORT,
-+      XSI_ETH_PORT,
-+};
-+
-+enum {
-+      XSI_PCIE0_VIP_PORT_MASK = BIT(22),
-+      XSI_PCIE1_VIP_PORT_MASK = BIT(23),
-+      XSI_USB_VIP_PORT_MASK   = BIT(25),
-+      XSI_ETH_VIP_PORT_MASK   = BIT(24),
-+};
-+
-+enum {
-+      DEV_STATE_INITIALIZED,
-+};
-+
-+enum {
-+      CDM_CRSN_QSEL_Q1 = 1,
-+      CDM_CRSN_QSEL_Q5 = 5,
-+      CDM_CRSN_QSEL_Q6 = 6,
-+      CDM_CRSN_QSEL_Q15 = 15,
-+};
-+
-+enum {
-+      CRSN_08 = 0x8,
-+      CRSN_21 = 0x15, /* KA */
-+      CRSN_22 = 0x16, /* hit bind and force route to CPU */
-+      CRSN_24 = 0x18,
-+      CRSN_25 = 0x19,
-+};
-+
-+enum {
-+      FE_PSE_PORT_CDM1,
-+      FE_PSE_PORT_GDM1,
-+      FE_PSE_PORT_GDM2,
-+      FE_PSE_PORT_GDM3,
-+      FE_PSE_PORT_PPE1,
-+      FE_PSE_PORT_CDM2,
-+      FE_PSE_PORT_CDM3,
-+      FE_PSE_PORT_CDM4,
-+      FE_PSE_PORT_PPE2,
-+      FE_PSE_PORT_GDM4,
-+      FE_PSE_PORT_CDM5,
-+      FE_PSE_PORT_DROP = 0xf,
-+};
-+
-+struct airoha_queue_entry {
-+      union {
-+              void *buf;
-+              struct sk_buff *skb;
-+      };
-+      dma_addr_t dma_addr;
-+      u16 dma_len;
-+};
-+
-+struct airoha_queue {
-+      struct airoha_eth *eth;
-+
-+      /* protect concurrent queue accesses */
-+      spinlock_t lock;
-+      struct airoha_queue_entry *entry;
-+      struct airoha_qdma_desc *desc;
-+      u16 head;
-+      u16 tail;
-+
-+      int queued;
-+      int ndesc;
-+      int free_thr;
-+      int buf_size;
-+
-+      struct napi_struct napi;
-+      struct page_pool *page_pool;
-+};
-+
-+struct airoha_tx_irq_queue {
-+      struct airoha_eth *eth;
-+
-+      struct napi_struct napi;
-+      u32 *q;
-+
-+      int size;
-+      int queued;
-+      u16 head;
-+};
-+
-+struct airoha_hw_stats {
-+      /* protect concurrent hw_stats accesses */
-+      spinlock_t lock;
-+      struct u64_stats_sync syncp;
-+
-+      /* get_stats64 */
-+      u64 rx_ok_pkts;
-+      u64 tx_ok_pkts;
-+      u64 rx_ok_bytes;
-+      u64 tx_ok_bytes;
-+      u64 rx_multicast;
-+      u64 rx_errors;
-+      u64 rx_drops;
-+      u64 tx_drops;
-+      u64 rx_crc_error;
-+      u64 rx_over_errors;
-+      /* ethtool stats */
-+      u64 tx_broadcast;
-+      u64 tx_multicast;
-+      u64 tx_len[7];
-+      u64 rx_broadcast;
-+      u64 rx_fragment;
-+      u64 rx_jabber;
-+      u64 rx_len[7];
-+};
-+
-+struct airoha_gdm_port {
-+      struct net_device *dev;
-+      struct airoha_eth *eth;
-+      int id;
-+
-+      struct airoha_hw_stats stats;
-+};
-+
-+struct airoha_eth {
-+      struct device *dev;
-+
-+      unsigned long state;
-+
-+      void __iomem *qdma_regs;
-+      void __iomem *fe_regs;
-+
-+      /* protect concurrent irqmask accesses */
-+      spinlock_t irq_lock;
-+      u32 irqmask[QDMA_INT_REG_MAX];
-+      int irq;
-+
-+      struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
-+      struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
-+
-+      struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
-+
-+      struct net_device *napi_dev;
-+      struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
-+      struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-+
-+      struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
-+
-+      /* descriptor and packet buffers for qdma hw forward */
-+      struct {
-+              void *desc;
-+              void *q;
-+      } hfwd;
-+};
-+
-+static u32 airoha_rr(void __iomem *base, u32 offset)
-+{
-+      return readl(base + offset);
-+}
-+
-+static void airoha_wr(void __iomem *base, u32 offset, u32 val)
-+{
-+      writel(val, base + offset);
-+}
-+
-+static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
-+{
-+      val |= (airoha_rr(base, offset) & ~mask);
-+      airoha_wr(base, offset, val);
-+
-+      return val;
-+}
-+
-+#define airoha_fe_rr(eth, offset)                             \
-+      airoha_rr((eth)->fe_regs, (offset))
-+#define airoha_fe_wr(eth, offset, val)                                \
-+      airoha_wr((eth)->fe_regs, (offset), (val))
-+#define airoha_fe_rmw(eth, offset, mask, val)                 \
-+      airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
-+#define airoha_fe_set(eth, offset, val)                               \
-+      airoha_rmw((eth)->fe_regs, (offset), 0, (val))
-+#define airoha_fe_clear(eth, offset, val)                     \
-+      airoha_rmw((eth)->fe_regs, (offset), (val), 0)
-+
-+#define airoha_qdma_rr(eth, offset)                           \
-+      airoha_rr((eth)->qdma_regs, (offset))
-+#define airoha_qdma_wr(eth, offset, val)                      \
-+      airoha_wr((eth)->qdma_regs, (offset), (val))
-+#define airoha_qdma_rmw(eth, offset, mask, val)                       \
-+      airoha_rmw((eth)->qdma_regs, (offset), (mask), (val))
-+#define airoha_qdma_set(eth, offset, val)                     \
-+      airoha_rmw((eth)->qdma_regs, (offset), 0, (val))
-+#define airoha_qdma_clear(eth, offset, val)                   \
-+      airoha_rmw((eth)->qdma_regs, (offset), (val), 0)
-+
-+static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index,
-+                                  u32 clear, u32 set)
-+{
-+      unsigned long flags;
-+
-+      if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask)))
-+              return;
-+
-+      spin_lock_irqsave(&eth->irq_lock, flags);
-+
-+      eth->irqmask[index] &= ~clear;
-+      eth->irqmask[index] |= set;
-+      airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]);
-+      /* Read irq_enable register in order to guarantee the update above
-+       * completes in the spinlock critical section.
-+       */
-+      airoha_qdma_rr(eth, REG_INT_ENABLE(index));
-+
-+      spin_unlock_irqrestore(&eth->irq_lock, flags);
-+}
-+
-+static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index,
-+                                 u32 mask)
-+{
-+      airoha_qdma_set_irqmask(eth, index, 0, mask);
-+}
-+
-+static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index,
-+                                  u32 mask)
-+{
-+      airoha_qdma_set_irqmask(eth, index, mask, 0);
-+}
-+
-+static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr)
-+{
-+      u32 val;
-+
-+      val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
-+      airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val);
-+
-+      val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
-+      airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val);
-+      airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val);
-+}
-+
-+static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
-+                                      u32 val)
-+{
-+      airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
-+                    FIELD_PREP(GDM_OCFQ_MASK, val));
-+      airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
-+                    FIELD_PREP(GDM_MCFQ_MASK, val));
-+      airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
-+                    FIELD_PREP(GDM_BCFQ_MASK, val));
-+      airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
-+                    FIELD_PREP(GDM_UCFQ_MASK, val));
-+}
-+
-+static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
-+{
-+      u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
-+      u32 vip_port, cfg_addr;
-+
-+      switch (port) {
-+      case XSI_PCIE0_PORT:
-+              vip_port = XSI_PCIE0_VIP_PORT_MASK;
-+              cfg_addr = REG_GDM_FWD_CFG(3);
-+              break;
-+      case XSI_PCIE1_PORT:
-+              vip_port = XSI_PCIE1_VIP_PORT_MASK;
-+              cfg_addr = REG_GDM_FWD_CFG(3);
-+              break;
-+      case XSI_USB_PORT:
-+              vip_port = XSI_USB_VIP_PORT_MASK;
-+              cfg_addr = REG_GDM_FWD_CFG(4);
-+              break;
-+      case XSI_ETH_PORT:
-+              vip_port = XSI_ETH_VIP_PORT_MASK;
-+              cfg_addr = REG_GDM_FWD_CFG(4);
-+              break;
-+      default:
-+              return -EINVAL;
-+      }
-+
-+      if (enable) {
-+              airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
-+              airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
-+      } else {
-+              airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
-+              airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
-+      }
-+
-+      airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
-+
-+      return 0;
-+}
-+
-+static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
-+{
-+      const int port_list[] = {
-+              XSI_PCIE0_PORT,
-+              XSI_PCIE1_PORT,
-+              XSI_USB_PORT,
-+              XSI_ETH_PORT
-+      };
-+      int i, err;
-+
-+      for (i = 0; i < ARRAY_SIZE(port_list); i++) {
-+              err = airoha_set_gdm_port(eth, port_list[i], enable);
-+              if (err)
-+                      goto error;
-+      }
-+
-+      return 0;
-+
-+error:
-+      for (i--; i >= 0; i++)
-+              airoha_set_gdm_port(eth, port_list[i], false);
-+
-+      return err;
-+}
-+
-+static void airoha_fe_maccr_init(struct airoha_eth *eth)
-+{
-+      int p;
-+
-+      for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
-+              airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
-+                            GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
-+                            GDM_DROP_CRC_ERR);
-+              airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
-+                                          FE_PSE_PORT_CDM1);
-+              airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
-+                            GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
-+                            FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
-+                            FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
-+      }
-+
-+      airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
-+                    FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
-+
-+      airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
-+}
-+
-+static void airoha_fe_vip_setup(struct airoha_eth *eth)
-+{
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
-+
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(4),
-+                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
-+                   PATN_EN_MASK);
-+
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(6),
-+                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
-+                   PATN_EN_MASK);
-+
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(7),
-+                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
-+                   PATN_EN_MASK);
-+
-+      /* BOOTP (0x43) */
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(8),
-+                   PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
-+                   FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
-+
-+      /* BOOTP (0x44) */
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(9),
-+                   PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
-+                   FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
-+
-+      /* ISAKMP */
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(10),
-+                   PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
-+                   FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
-+
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(11),
-+                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
-+                   PATN_EN_MASK);
-+
-+      /* DHCPv6 */
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(12),
-+                   PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
-+                   FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
-+
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(19),
-+                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
-+                   PATN_EN_MASK);
-+
-+      /* ETH->ETH_P_1905 (0x893a) */
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(20),
-+                   PATN_FCPU_EN_MASK | PATN_EN_MASK);
-+
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(21),
-+                   PATN_FCPU_EN_MASK | PATN_EN_MASK);
-+}
-+
-+static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
-+                                           u32 port, u32 queue)
-+{
-+      u32 val;
-+
-+      airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
-+                    PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
-+                    FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
-+                    FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
-+      val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
-+
-+      return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
-+}
-+
-+static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
-+                                            u32 port, u32 queue, u32 val)
-+{
-+      airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
-+                    FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
-+      airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
-+                    PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
-+                    PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
-+                    FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
-+                    FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
-+                    PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
-+}
-+
-+static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
-+                                  u32 port, u32 queue, u32 val)
-+{
-+      u32 orig_val, tmp, all_rsv, fq_limit;
-+
-+      airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
-+
-+      /* modify all rsv */
-+      orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
-+      tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
-+      all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp);
-+      all_rsv += (val - orig_val);
-+      airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
-+                    FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
-+
-+      /* modify hthd */
-+      tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
-+      fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
-+      tmp = fq_limit - all_rsv - 0x20;
-+      airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
-+                    PSE_SHARE_USED_HTHD_MASK,
-+                    FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
-+
-+      tmp = fq_limit - all_rsv - 0x100;
-+      airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
-+                    PSE_SHARE_USED_MTHD_MASK,
-+                    FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
-+      tmp = (3 * tmp) >> 2;
-+      airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
-+                    PSE_SHARE_USED_LTHD_MASK,
-+                    FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
-+
-+      return 0;
-+}
-+
-+static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
-+{
-+      const u32 pse_port_num_queues[] = {
-+              [FE_PSE_PORT_CDM1] = 6,
-+              [FE_PSE_PORT_GDM1] = 6,
-+              [FE_PSE_PORT_GDM2] = 32,
-+              [FE_PSE_PORT_GDM3] = 6,
-+              [FE_PSE_PORT_PPE1] = 4,
-+              [FE_PSE_PORT_CDM2] = 6,
-+              [FE_PSE_PORT_CDM3] = 8,
-+              [FE_PSE_PORT_CDM4] = 10,
-+              [FE_PSE_PORT_PPE2] = 4,
-+              [FE_PSE_PORT_GDM4] = 2,
-+              [FE_PSE_PORT_CDM5] = 2,
-+      };
-+      int q;
-+
-+      /* hw misses PPE2 oq rsv */
-+      airoha_fe_set(eth, REG_FE_PSE_BUF_SET,
-+                    PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]);
-+
-+      /* CMD1 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+      /* GMD1 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+      /* GMD2 */
-+      for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
-+      /* GMD3 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+      /* PPE1 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
-+              if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
-+                      airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
-+                                               PSE_QUEUE_RSV_PAGES);
-+              else
-+                      airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
-+      }
-+      /* CDM2 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+      /* CDM3 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
-+      /* CDM4 */
-+      for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+      /* PPE2 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
-+              if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
-+                      airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
-+                                               PSE_QUEUE_RSV_PAGES);
-+              else
-+                      airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
-+      }
-+      /* GMD4 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+      /* CDM5 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+}
-+
-+static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
-+{
-+      int i;
-+
-+      for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
-+              int err, j;
-+              u32 val;
-+
-+              airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
-+
-+              val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
-+                    MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
-+              airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
-+              err = read_poll_timeout(airoha_fe_rr, val,
-+                                      val & MC_VLAN_CFG_CMD_DONE_MASK,
-+                                      USEC_PER_MSEC, 5 * USEC_PER_MSEC,
-+                                      false, eth, REG_MC_VLAN_CFG);
-+              if (err)
-+                      return err;
-+
-+              for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
-+                      airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
-+
-+                      val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
-+                            FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
-+                            MC_VLAN_CFG_RW_MASK;
-+                      airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
-+                      err = read_poll_timeout(airoha_fe_rr, val,
-+                                              val & MC_VLAN_CFG_CMD_DONE_MASK,
-+                                              USEC_PER_MSEC,
-+                                              5 * USEC_PER_MSEC, false, eth,
-+                                              REG_MC_VLAN_CFG);
-+                      if (err)
-+                              return err;
-+              }
-+      }
-+
-+      return 0;
-+}
-+
-+static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
-+{
-+      /* CDM1_CRSN_QSEL */
-+      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
-+                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
-+                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
-+                               CDM_CRSN_QSEL_Q1));
-+      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
-+                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
-+                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
-+                               CDM_CRSN_QSEL_Q1));
-+      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
-+                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
-+                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
-+                               CDM_CRSN_QSEL_Q1));
-+      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
-+                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
-+                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
-+                               CDM_CRSN_QSEL_Q6));
-+      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
-+                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
-+                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
-+                               CDM_CRSN_QSEL_Q1));
-+      /* CDM2_CRSN_QSEL */
-+      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
-+                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
-+                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
-+                               CDM_CRSN_QSEL_Q1));
-+      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
-+                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
-+                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
-+                               CDM_CRSN_QSEL_Q1));
-+      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
-+                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
-+                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
-+                               CDM_CRSN_QSEL_Q1));
-+      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
-+                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
-+                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
-+                               CDM_CRSN_QSEL_Q6));
-+      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
-+                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
-+                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
-+                               CDM_CRSN_QSEL_Q1));
-+}
-+
-+static int airoha_fe_init(struct airoha_eth *eth)
-+{
-+      airoha_fe_maccr_init(eth);
-+
-+      /* PSE IQ reserve */
-+      airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
-+                    FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
-+      airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
-+                    PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
-+                    FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
-+                    FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
-+
-+      /* enable FE copy engine for MC/KA/DPI */
-+      airoha_fe_wr(eth, REG_FE_PCE_CFG,
-+                   PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
-+      /* set vip queue selection to ring 1 */
-+      airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
-+                    FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
-+      airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
-+                    FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
-+      /* set GDM4 source interface offset to 8 */
-+      airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
-+                    GDM4_SPORT_OFF2_MASK |
-+                    GDM4_SPORT_OFF1_MASK |
-+                    GDM4_SPORT_OFF0_MASK,
-+                    FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
-+                    FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
-+                    FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
-+
-+      /* set PSE Page as 128B */
-+      airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
-+                    FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
-+                    FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
-+                    FE_DMA_GLO_PG_SZ_MASK);
-+      airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
-+                   FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
-+                   FE_RST_GDM4_MBI_ARB_MASK);
-+      usleep_range(1000, 2000);
-+
-+      /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
-+       * connect other rings to PSE Port0 OQ-0
-+       */
-+      airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
-+      airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
-+      airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
-+      airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
-+
-+      airoha_fe_vip_setup(eth);
-+      airoha_fe_pse_ports_init(eth);
-+
-+      airoha_fe_set(eth, REG_GDM_MISC_CFG,
-+                    GDM2_RDM_ACK_WAIT_PREF_MASK |
-+                    GDM2_CHN_VLD_MODE_MASK);
-+      airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 15);
-+
-+      /* init fragment and assemble Force Port */
-+      /* NPU Core-3, NPU Bridge Channel-3 */
-+      airoha_fe_rmw(eth, REG_IP_FRAG_FP,
-+                    IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
-+                    FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
-+                    FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
-+      /* QDMA LAN, RX Ring-22 */
-+      airoha_fe_rmw(eth, REG_IP_FRAG_FP,
-+                    IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
-+                    FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
-+                    FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
-+
-+      airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
-+      airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
-+
-+      airoha_fe_crsn_qsel_init(eth);
-+
-+      airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
-+      airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
-+
-+      /* default aging mode for mbi unlock issue */
-+      airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
-+                    MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
-+                    FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
-+                    FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
-+
-+      /* disable IFC by default */
-+      airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
-+
-+      /* enable 1:N vlan action, init vlan table */
-+      airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
-+
-+      return airoha_fe_mc_vlan_clear(eth);
-+}
-+
-+static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
-+{
-+      enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
-+      struct airoha_eth *eth = q->eth;
-+      int qid = q - &eth->q_rx[0];
-+      int nframes = 0;
-+
-+      while (q->queued < q->ndesc - 1) {
-+              struct airoha_queue_entry *e = &q->entry[q->head];
-+              struct airoha_qdma_desc *desc = &q->desc[q->head];
-+              struct page *page;
-+              int offset;
-+              u32 val;
-+
-+              page = page_pool_dev_alloc_frag(q->page_pool, &offset,
-+                                              q->buf_size);
-+              if (!page)
-+                      break;
-+
-+              q->head = (q->head + 1) % q->ndesc;
-+              q->queued++;
-+              nframes++;
-+
-+              e->buf = page_address(page) + offset;
-+              e->dma_addr = page_pool_get_dma_addr(page) + offset;
-+              e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
-+
-+              dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
-+                                         dir);
-+
-+              val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
-+              WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
-+              WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
-+              val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
-+              WRITE_ONCE(desc->data, cpu_to_le32(val));
-+              WRITE_ONCE(desc->msg0, 0);
-+              WRITE_ONCE(desc->msg1, 0);
-+              WRITE_ONCE(desc->msg2, 0);
-+              WRITE_ONCE(desc->msg3, 0);
-+
-+              airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
-+                              FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
-+      }
-+
-+      return nframes;
-+}
-+
-+static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
-+                                  struct airoha_qdma_desc *desc)
-+{
-+      u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
-+
-+      sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
-+      switch (sport) {
-+      case 0x10 ... 0x13:
-+              port = 0;
-+              break;
-+      case 0x2 ... 0x4:
-+              port = sport - 1;
-+              break;
-+      default:
-+              return -EINVAL;
-+      }
-+
-+      return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
-+}
-+
-+static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
-+{
-+      enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
-+      struct airoha_eth *eth = q->eth;
-+      int qid = q - &eth->q_rx[0];
-+      int done = 0;
-+
-+      while (done < budget) {
-+              struct airoha_queue_entry *e = &q->entry[q->tail];
-+              struct airoha_qdma_desc *desc = &q->desc[q->tail];
-+              dma_addr_t dma_addr = le32_to_cpu(desc->addr);
-+              u32 desc_ctrl = le32_to_cpu(desc->ctrl);
-+              struct sk_buff *skb;
-+              int len, p;
-+
-+              if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
-+                      break;
-+
-+              if (!dma_addr)
-+                      break;
-+
-+              len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
-+              if (!len)
-+                      break;
-+
-+              q->tail = (q->tail + 1) % q->ndesc;
-+              q->queued--;
-+
-+              dma_sync_single_for_cpu(eth->dev, dma_addr,
-+                                      SKB_WITH_OVERHEAD(q->buf_size), dir);
-+
-+              p = airoha_qdma_get_gdm_port(eth, desc);
-+              if (p < 0 || !eth->ports[p]) {
-+                      page_pool_put_full_page(q->page_pool,
-+                                              virt_to_head_page(e->buf),
-+                                              true);
-+                      continue;
-+              }
-+
-+              skb = napi_build_skb(e->buf, q->buf_size);
-+              if (!skb) {
-+                      page_pool_put_full_page(q->page_pool,
-+                                              virt_to_head_page(e->buf),
-+                                              true);
-+                      break;
-+              }
-+
-+              skb_reserve(skb, 2);
-+              __skb_put(skb, len);
-+              skb_mark_for_recycle(skb);
-+              skb->dev = eth->ports[p]->dev;
-+              skb->protocol = eth_type_trans(skb, skb->dev);
-+              skb->ip_summed = CHECKSUM_UNNECESSARY;
-+              skb_record_rx_queue(skb, qid);
-+              napi_gro_receive(&q->napi, skb);
-+
-+              done++;
-+      }
-+      airoha_qdma_fill_rx_queue(q);
-+
-+      return done;
-+}
-+
-+static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
-+{
-+      struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
-+      struct airoha_eth *eth = q->eth;
-+      int cur, done = 0;
-+
-+      do {
-+              cur = airoha_qdma_rx_process(q, budget - done);
-+              done += cur;
-+      } while (cur && done < budget);
-+
-+      if (done < budget && napi_complete(napi))
-+              airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1,
-+                                     RX_DONE_INT_MASK);
-+
-+      return done;
-+}
-+
-+static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
-+                                   struct airoha_queue *q, int ndesc)
-+{
-+      const struct page_pool_params pp_params = {
-+              .order = 0,
-+              .pool_size = 256,
-+              .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
-+                       PP_FLAG_PAGE_FRAG,
-+              .dma_dir = DMA_FROM_DEVICE,
-+              .max_len = PAGE_SIZE,
-+              .nid = NUMA_NO_NODE,
-+              .dev = eth->dev,
-+              .napi = &q->napi,
-+      };
-+      int qid = q - &eth->q_rx[0], thr;
-+      dma_addr_t dma_addr;
-+
-+      q->buf_size = PAGE_SIZE / 2;
-+      q->ndesc = ndesc;
-+      q->eth = eth;
-+
-+      q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
-+                              GFP_KERNEL);
-+      if (!q->entry)
-+              return -ENOMEM;
-+
-+      q->page_pool = page_pool_create(&pp_params);
-+      if (IS_ERR(q->page_pool)) {
-+              int err = PTR_ERR(q->page_pool);
-+
-+              q->page_pool = NULL;
-+              return err;
-+      }
-+
-+      q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
-+                                    &dma_addr, GFP_KERNEL);
-+      if (!q->desc)
-+              return -ENOMEM;
-+
-+      netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
-+
-+      airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr);
-+      airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK,
-+                      FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
-+
-+      thr = clamp(ndesc >> 3, 1, 32);
-+      airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
-+                      FIELD_PREP(RX_RING_THR_MASK, thr));
-+      airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
-+                      FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
-+
-+      airoha_qdma_fill_rx_queue(q);
-+
-+      return 0;
-+}
-+
-+static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
-+{
-+      enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
-+      struct airoha_eth *eth = q->eth;
-+
-+      while (q->queued) {
-+              struct airoha_queue_entry *e = &q->entry[q->tail];
-+              struct page *page = virt_to_head_page(e->buf);
-+
-+              dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
-+                                      dir);
-+              page_pool_put_full_page(q->page_pool, page, false);
-+              q->tail = (q->tail + 1) % q->ndesc;
-+              q->queued--;
-+      }
-+}
-+
-+static int airoha_qdma_init_rx(struct airoha_eth *eth)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
-+              int err;
-+
-+              if (!(RX_DONE_INT_MASK & BIT(i))) {
-+                      /* rx-queue not binded to irq */
-+                      continue;
-+              }
-+
-+              err = airoha_qdma_init_rx_queue(eth, &eth->q_rx[i],
-+                                              RX_DSCP_NUM(i));
-+              if (err)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
-+{
-+      struct airoha_tx_irq_queue *irq_q;
-+      struct airoha_eth *eth;
-+      int id, done = 0;
-+
-+      irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
-+      eth = irq_q->eth;
-+      id = irq_q - &eth->q_tx_irq[0];
-+
-+      while (irq_q->queued > 0 && done < budget) {
-+              u32 qid, last, val = irq_q->q[irq_q->head];
-+              struct airoha_queue *q;
-+
-+              if (val == 0xff)
-+                      break;
-+
-+              irq_q->q[irq_q->head] = 0xff; /* mark as done */
-+              irq_q->head = (irq_q->head + 1) % irq_q->size;
-+              irq_q->queued--;
-+              done++;
-+
-+              last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
-+              qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
-+
-+              if (qid >= ARRAY_SIZE(eth->q_tx))
-+                      continue;
-+
-+              q = &eth->q_tx[qid];
-+              if (!q->ndesc)
-+                      continue;
-+
-+              spin_lock_bh(&q->lock);
-+
-+              while (q->queued > 0) {
-+                      struct airoha_qdma_desc *desc = &q->desc[q->tail];
-+                      struct airoha_queue_entry *e = &q->entry[q->tail];
-+                      u32 desc_ctrl = le32_to_cpu(desc->ctrl);
-+                      struct sk_buff *skb = e->skb;
-+                      u16 index = q->tail;
-+
-+                      if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
-+                          !(desc_ctrl & QDMA_DESC_DROP_MASK))
-+                              break;
-+
-+                      q->tail = (q->tail + 1) % q->ndesc;
-+                      q->queued--;
-+
-+                      dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
-+                                       DMA_TO_DEVICE);
-+
-+                      WRITE_ONCE(desc->msg0, 0);
-+                      WRITE_ONCE(desc->msg1, 0);
-+
-+                      if (skb) {
-+                              struct netdev_queue *txq;
-+
-+                              txq = netdev_get_tx_queue(skb->dev, qid);
-+                              if (netif_tx_queue_stopped(txq) &&
-+                                  q->ndesc - q->queued >= q->free_thr)
-+                                      netif_tx_wake_queue(txq);
-+
-+                              dev_kfree_skb_any(skb);
-+                              e->skb = NULL;
-+                      }
-+
-+                      if (index == last)
-+                              break;
-+              }
-+
-+              spin_unlock_bh(&q->lock);
-+      }
-+
-+      if (done) {
-+              int i, len = done >> 7;
-+
-+              for (i = 0; i < len; i++)
-+                      airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
-+                                      IRQ_CLEAR_LEN_MASK, 0x80);
-+              airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
-+                              IRQ_CLEAR_LEN_MASK, (done & 0x7f));
-+      }
-+
-+      if (done < budget && napi_complete(napi))
-+              airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0,
-+                                     TX_DONE_INT_MASK(id));
-+
-+      return done;
-+}
-+
-+static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
-+                                   struct airoha_queue *q, int size)
-+{
-+      int i, qid = q - &eth->q_tx[0];
-+      dma_addr_t dma_addr;
-+
-+      spin_lock_init(&q->lock);
-+      q->ndesc = size;
-+      q->eth = eth;
-+      q->free_thr = 1 + MAX_SKB_FRAGS;
-+
-+      q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
-+                              GFP_KERNEL);
-+      if (!q->entry)
-+              return -ENOMEM;
-+
-+      q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
-+                                    &dma_addr, GFP_KERNEL);
-+      if (!q->desc)
-+              return -ENOMEM;
-+
-+      for (i = 0; i < q->ndesc; i++) {
-+              u32 val;
-+
-+              val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
-+              WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
-+      }
-+
-+      airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr);
-+      airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
-+                      FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
-+      airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
-+                      FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
-+
-+      return 0;
-+}
-+
-+static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
-+                                 struct airoha_tx_irq_queue *irq_q,
-+                                 int size)
-+{
-+      int id = irq_q - &eth->q_tx_irq[0];
-+      dma_addr_t dma_addr;
-+
-+      netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
-+                        airoha_qdma_tx_napi_poll);
-+      irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
-+                                     &dma_addr, GFP_KERNEL);
-+      if (!irq_q->q)
-+              return -ENOMEM;
-+
-+      memset(irq_q->q, 0xff, size * sizeof(u32));
-+      irq_q->size = size;
-+      irq_q->eth = eth;
-+
-+      airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr);
-+      airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
-+                      FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
-+      airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
-+                      FIELD_PREP(TX_IRQ_THR_MASK, 1));
-+
-+      return 0;
-+}
-+
-+static int airoha_qdma_init_tx(struct airoha_eth *eth)
-+{
-+      int i, err;
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
-+              err = airoha_qdma_tx_irq_init(eth, &eth->q_tx_irq[i],
-+                                            IRQ_QUEUE_LEN(i));
-+              if (err)
-+                      return err;
-+      }
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
-+              err = airoha_qdma_init_tx_queue(eth, &eth->q_tx[i],
-+                                              TX_DSCP_NUM);
-+              if (err)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
-+{
-+      struct airoha_eth *eth = q->eth;
-+
-+      spin_lock_bh(&q->lock);
-+      while (q->queued) {
-+              struct airoha_queue_entry *e = &q->entry[q->tail];
-+
-+              dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
-+                               DMA_TO_DEVICE);
-+              dev_kfree_skb_any(e->skb);
-+              e->skb = NULL;
-+
-+              q->tail = (q->tail + 1) % q->ndesc;
-+              q->queued--;
-+      }
-+      spin_unlock_bh(&q->lock);
-+}
-+
-+static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth)
-+{
-+      dma_addr_t dma_addr;
-+      u32 status;
-+      int size;
-+
-+      size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
-+      eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
-+                                           GFP_KERNEL);
-+      if (!eth->hfwd.desc)
-+              return -ENOMEM;
-+
-+      airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr);
-+
-+      size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
-+      eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
-+                                        GFP_KERNEL);
-+      if (!eth->hfwd.q)
-+              return -ENOMEM;
-+
-+      airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr);
-+
-+      airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG,
-+                      HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
-+                      FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
-+      airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
-+                      FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
-+      airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG,
-+                      LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
-+                      HW_FWD_DESC_NUM_MASK,
-+                      FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
-+                      LMGR_INIT_START);
-+
-+      return read_poll_timeout(airoha_qdma_rr, status,
-+                               !(status & LMGR_INIT_START), USEC_PER_MSEC,
-+                               30 * USEC_PER_MSEC, true, eth,
-+                               REG_LMGR_INIT_CFG);
-+}
-+
-+static void airoha_qdma_init_qos(struct airoha_eth *eth)
-+{
-+      airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
-+      airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
-+
-+      airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG,
-+                        PSE_BUF_ESTIMATE_EN_MASK);
-+
-+      airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG,
-+                      EGRESS_RATE_METER_EN_MASK |
-+                      EGRESS_RATE_METER_EQ_RATE_EN_MASK);
-+      /* 2047us x 31 = 63.457ms */
-+      airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
-+                      EGRESS_RATE_METER_WINDOW_SZ_MASK,
-+                      FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
-+      airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
-+                      EGRESS_RATE_METER_TIMESLICE_MASK,
-+                      FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
-+
-+      /* ratelimit init */
-+      airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
-+      /* fast-tick 25us */
-+      airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
-+                      FIELD_PREP(GLB_FAST_TICK_MASK, 25));
-+      airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
-+                      FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
-+
-+      airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
-+      airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
-+                      FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
-+      airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG,
-+                      EGRESS_SLOW_TICK_RATIO_MASK,
-+                      FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
-+
-+      airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
-+      airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG,
-+                        INGRESS_TRTCM_MODE_MASK);
-+      airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
-+                      FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
-+      airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG,
-+                      INGRESS_SLOW_TICK_RATIO_MASK,
-+                      FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
-+
-+      airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
-+      airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
-+                      FIELD_PREP(SLA_FAST_TICK_MASK, 25));
-+      airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
-+                      FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
-+}
-+
-+static int airoha_qdma_hw_init(struct airoha_eth *eth)
-+{
-+      int i;
-+
-+      /* clear pending irqs */
-+      for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++)
-+              airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff);
-+
-+      /* setup irqs */
-+      airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
-+      airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
-+      airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
-+
-+      /* setup irq binding */
-+      for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
-+              if (!eth->q_tx[i].ndesc)
-+                      continue;
-+
-+              if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
-+                      airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i),
-+                                      TX_RING_IRQ_BLOCKING_CFG_MASK);
-+              else
-+                      airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i),
-+                                        TX_RING_IRQ_BLOCKING_CFG_MASK);
-+      }
-+
-+      airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG,
-+                     GLOBAL_CFG_RX_2B_OFFSET_MASK |
-+                     FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
-+                     GLOBAL_CFG_CPU_TXR_RR_MASK |
-+                     GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
-+                     GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
-+                     GLOBAL_CFG_MULTICAST_EN_MASK |
-+                     GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
-+                     GLOBAL_CFG_TX_WB_DONE_MASK |
-+                     FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
-+
-+      airoha_qdma_init_qos(eth);
-+
-+      /* disable qdma rx delay interrupt */
-+      for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
-+              if (!eth->q_rx[i].ndesc)
-+                      continue;
-+
-+              airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i),
-+                                RX_DELAY_INT_MASK);
-+      }
-+
-+      airoha_qdma_set(eth, REG_TXQ_CNGST_CFG,
-+                      TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
-+
-+      return 0;
-+}
-+
-+static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
-+{
-+      struct airoha_eth *eth = dev_instance;
-+      u32 intr[ARRAY_SIZE(eth->irqmask)];
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) {
-+              intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i));
-+              intr[i] &= eth->irqmask[i];
-+              airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]);
-+      }
-+
-+      if (!test_bit(DEV_STATE_INITIALIZED, &eth->state))
-+              return IRQ_NONE;
-+
-+      if (intr[1] & RX_DONE_INT_MASK) {
-+              airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
-+                                      RX_DONE_INT_MASK);
-+
-+              for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
-+                      if (!eth->q_rx[i].ndesc)
-+                              continue;
-+
-+                      if (intr[1] & BIT(i))
-+                              napi_schedule(&eth->q_rx[i].napi);
-+              }
-+      }
-+
-+      if (intr[0] & INT_TX_MASK) {
-+              for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
-+                      struct airoha_tx_irq_queue *irq_q = &eth->q_tx_irq[i];
-+                      u32 status, head;
-+
-+                      if (!(intr[0] & TX_DONE_INT_MASK(i)))
-+                              continue;
-+
-+                      airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0,
-+                                              TX_DONE_INT_MASK(i));
-+
-+                      status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i));
-+                      head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
-+                      irq_q->head = head % irq_q->size;
-+                      irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
-+
-+                      napi_schedule(&eth->q_tx_irq[i].napi);
-+              }
-+      }
-+
-+      return IRQ_HANDLED;
-+}
-+
-+static int airoha_qdma_init(struct airoha_eth *eth)
-+{
-+      int err;
-+
-+      err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler,
-+                             IRQF_SHARED, KBUILD_MODNAME, eth);
-+      if (err)
-+              return err;
-+
-+      err = airoha_qdma_init_rx(eth);
-+      if (err)
-+              return err;
-+
-+      err = airoha_qdma_init_tx(eth);
-+      if (err)
-+              return err;
-+
-+      err = airoha_qdma_init_hfwd_queues(eth);
-+      if (err)
-+              return err;
-+
-+      err = airoha_qdma_hw_init(eth);
-+      if (err)
-+              return err;
-+
-+      set_bit(DEV_STATE_INITIALIZED, &eth->state);
-+
-+      return 0;
-+}
-+
-+static int airoha_hw_init(struct airoha_eth *eth)
-+{
-+      int err;
-+
-+      /* disable xsi */
-+      reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts);
-+
-+      reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
-+      msleep(20);
-+      reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
-+      msleep(20);
-+
-+      err = airoha_fe_init(eth);
-+      if (err)
-+              return err;
-+
-+      return airoha_qdma_init(eth);
-+}
-+
-+static void airoha_hw_cleanup(struct airoha_eth *eth)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
-+              if (!eth->q_rx[i].ndesc)
-+                      continue;
-+
-+              napi_disable(&eth->q_rx[i].napi);
-+              netif_napi_del(&eth->q_rx[i].napi);
-+              airoha_qdma_cleanup_rx_queue(&eth->q_rx[i]);
-+              if (eth->q_rx[i].page_pool)
-+                      page_pool_destroy(eth->q_rx[i].page_pool);
-+      }
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
-+              napi_disable(&eth->q_tx_irq[i].napi);
-+              netif_napi_del(&eth->q_tx_irq[i].napi);
-+      }
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
-+              if (!eth->q_tx[i].ndesc)
-+                      continue;
-+
-+              airoha_qdma_cleanup_tx_queue(&eth->q_tx[i]);
-+      }
-+}
-+
-+static void airoha_qdma_start_napi(struct airoha_eth *eth)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++)
-+              napi_enable(&eth->q_tx_irq[i].napi);
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
-+              if (!eth->q_rx[i].ndesc)
-+                      continue;
-+
-+              napi_enable(&eth->q_rx[i].napi);
-+      }
-+}
-+
-+static void airoha_update_hw_stats(struct airoha_gdm_port *port)
-+{
-+      struct airoha_eth *eth = port->eth;
-+      u32 val, i = 0;
-+
-+      spin_lock(&port->stats.lock);
-+      u64_stats_update_begin(&port->stats.syncp);
-+
-+      /* TX */
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
-+      port->stats.tx_ok_pkts += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
-+      port->stats.tx_ok_pkts += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
-+      port->stats.tx_ok_bytes += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
-+      port->stats.tx_ok_bytes += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
-+      port->stats.tx_drops += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
-+      port->stats.tx_broadcast += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
-+      port->stats.tx_multicast += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
-+      port->stats.tx_len[i] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
-+      port->stats.tx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
-+      port->stats.tx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
-+      port->stats.tx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
-+      port->stats.tx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
-+      port->stats.tx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
-+      port->stats.tx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      /* RX */
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
-+      port->stats.rx_ok_pkts += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
-+      port->stats.rx_ok_pkts += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
-+      port->stats.rx_ok_bytes += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
-+      port->stats.rx_ok_bytes += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
-+      port->stats.rx_drops += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
-+      port->stats.rx_broadcast += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
-+      port->stats.rx_multicast += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
-+      port->stats.rx_errors += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
-+      port->stats.rx_crc_error += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
-+      port->stats.rx_over_errors += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
-+      port->stats.rx_fragment += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
-+      port->stats.rx_jabber += val;
-+
-+      i = 0;
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
-+      port->stats.rx_len[i] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
-+      port->stats.rx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
-+      port->stats.rx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
-+      port->stats.rx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
-+      port->stats.rx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
-+      port->stats.rx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
-+      port->stats.rx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      /* reset mib counters */
-+      airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
-+                    FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
-+
-+      u64_stats_update_end(&port->stats.syncp);
-+      spin_unlock(&port->stats.lock);
-+}
-+
-+static int airoha_dev_open(struct net_device *dev)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct airoha_eth *eth = port->eth;
-+      int err;
-+
-+      netif_tx_start_all_queues(dev);
-+      err = airoha_set_gdm_ports(eth, true);
-+      if (err)
-+              return err;
-+
-+      if (netdev_uses_dsa(dev))
-+              airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id),
-+                            GDM_STAG_EN_MASK);
-+      else
-+              airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id),
-+                              GDM_STAG_EN_MASK);
-+
-+      airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
-+      airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
-+
-+      return 0;
-+}
-+
-+static int airoha_dev_stop(struct net_device *dev)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct airoha_eth *eth = port->eth;
-+      int err;
-+
-+      netif_tx_disable(dev);
-+      err = airoha_set_gdm_ports(eth, false);
-+      if (err)
-+              return err;
-+
-+      airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
-+      airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
-+
-+      return 0;
-+}
-+
-+static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      int err;
-+
-+      err = eth_mac_addr(dev, p);
-+      if (err)
-+              return err;
-+
-+      airoha_set_macaddr(port->eth, dev->dev_addr);
-+
-+      return 0;
-+}
-+
-+static int airoha_dev_init(struct net_device *dev)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+
-+      airoha_set_macaddr(port->eth, dev->dev_addr);
-+
-+      return 0;
-+}
-+
-+static void airoha_dev_get_stats64(struct net_device *dev,
-+                                 struct rtnl_link_stats64 *storage)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      unsigned int start;
-+
-+      airoha_update_hw_stats(port);
-+      do {
-+              start = u64_stats_fetch_begin(&port->stats.syncp);
-+              storage->rx_packets = port->stats.rx_ok_pkts;
-+              storage->tx_packets = port->stats.tx_ok_pkts;
-+              storage->rx_bytes = port->stats.rx_ok_bytes;
-+              storage->tx_bytes = port->stats.tx_ok_bytes;
-+              storage->multicast = port->stats.rx_multicast;
-+              storage->rx_errors = port->stats.rx_errors;
-+              storage->rx_dropped = port->stats.rx_drops;
-+              storage->tx_dropped = port->stats.tx_drops;
-+              storage->rx_crc_errors = port->stats.rx_crc_error;
-+              storage->rx_over_errors = port->stats.rx_over_errors;
-+      } while (u64_stats_fetch_retry(&port->stats.syncp, start));
-+}
-+
-+static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
-+                                 struct net_device *dev)
-+{
-+      struct skb_shared_info *sinfo = skb_shinfo(skb);
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      u32 msg0 = 0, msg1, len = skb_headlen(skb);
-+      int i, qid = skb_get_queue_mapping(skb);
-+      struct airoha_eth *eth = port->eth;
-+      u32 nr_frags = 1 + sinfo->nr_frags;
-+      struct netdev_queue *txq;
-+      struct airoha_queue *q;
-+      void *data = skb->data;
-+      u16 index;
-+      u8 fport;
-+
-+      if (skb->ip_summed == CHECKSUM_PARTIAL)
-+              msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
-+                      FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
-+                      FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
-+
-+      /* TSO: fill MSS info in tcp checksum field */
-+      if (skb_is_gso(skb)) {
-+              if (skb_cow_head(skb, 0))
-+                      goto error;
-+
-+              if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
-+                      __be16 csum = cpu_to_be16(sinfo->gso_size);
-+
-+                      tcp_hdr(skb)->check = (__force __sum16)csum;
-+                      msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
-+              }
-+      }
-+
-+      fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
-+      msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
-+             FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
-+
-+      q = &eth->q_tx[qid];
-+      if (WARN_ON_ONCE(!q->ndesc))
-+              goto error;
-+
-+      spin_lock_bh(&q->lock);
-+
-+      txq = netdev_get_tx_queue(dev, qid);
-+      if (q->queued + nr_frags > q->ndesc) {
-+              /* not enough space in the queue */
-+              netif_tx_stop_queue(txq);
-+              spin_unlock_bh(&q->lock);
-+              return NETDEV_TX_BUSY;
-+      }
-+
-+      index = q->head;
-+      for (i = 0; i < nr_frags; i++) {
-+              struct airoha_qdma_desc *desc = &q->desc[index];
-+              struct airoha_queue_entry *e = &q->entry[index];
-+              skb_frag_t *frag = &sinfo->frags[i];
-+              dma_addr_t addr;
-+              u32 val;
-+
-+              addr = dma_map_single(dev->dev.parent, data, len,
-+                                    DMA_TO_DEVICE);
-+              if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
-+                      goto error_unmap;
-+
-+              index = (index + 1) % q->ndesc;
-+
-+              val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
-+              if (i < nr_frags - 1)
-+                      val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
-+              WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
-+              WRITE_ONCE(desc->addr, cpu_to_le32(addr));
-+              val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
-+              WRITE_ONCE(desc->data, cpu_to_le32(val));
-+              WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
-+              WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
-+              WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
-+
-+              e->skb = i ? NULL : skb;
-+              e->dma_addr = addr;
-+              e->dma_len = len;
-+
-+              airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
-+                              FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
-+
-+              data = skb_frag_address(frag);
-+              len = skb_frag_size(frag);
-+      }
-+
-+      q->head = index;
-+      q->queued += i;
-+
-+      skb_tx_timestamp(skb);
-+      if (q->ndesc - q->queued < q->free_thr)
-+              netif_tx_stop_queue(txq);
-+
-+      spin_unlock_bh(&q->lock);
-+
-+      return NETDEV_TX_OK;
-+
-+error_unmap:
-+      for (i--; i >= 0; i++)
-+              dma_unmap_single(dev->dev.parent, q->entry[i].dma_addr,
-+                               q->entry[i].dma_len, DMA_TO_DEVICE);
-+
-+      spin_unlock_bh(&q->lock);
-+error:
-+      dev_kfree_skb_any(skb);
-+      dev->stats.tx_dropped++;
-+
-+      return NETDEV_TX_OK;
-+}
-+
-+static void airoha_ethtool_get_drvinfo(struct net_device *dev,
-+                                     struct ethtool_drvinfo *info)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct airoha_eth *eth = port->eth;
-+
-+      strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
-+      strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
-+}
-+
-+static void airoha_ethtool_get_mac_stats(struct net_device *dev,
-+                                       struct ethtool_eth_mac_stats *stats)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      unsigned int start;
-+
-+      airoha_update_hw_stats(port);
-+      do {
-+              start = u64_stats_fetch_begin(&port->stats.syncp);
-+              stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
-+              stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
-+              stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
-+      } while (u64_stats_fetch_retry(&port->stats.syncp, start));
-+}
-+
-+static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
-+      {    0,    64 },
-+      {   65,   127 },
-+      {  128,   255 },
-+      {  256,   511 },
-+      {  512,  1023 },
-+      { 1024,  1518 },
-+      { 1519, 10239 },
-+      {},
-+};
-+
-+static void
-+airoha_ethtool_get_rmon_stats(struct net_device *dev,
-+                            struct ethtool_rmon_stats *stats,
-+                            const struct ethtool_rmon_hist_range **ranges)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct airoha_hw_stats *hw_stats = &port->stats;
-+      unsigned int start;
-+
-+      BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
-+                   ARRAY_SIZE(hw_stats->tx_len) + 1);
-+      BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
-+                   ARRAY_SIZE(hw_stats->rx_len) + 1);
-+
-+      *ranges = airoha_ethtool_rmon_ranges;
-+      airoha_update_hw_stats(port);
-+      do {
-+              int i;
-+
-+              start = u64_stats_fetch_begin(&port->stats.syncp);
-+              stats->fragments = hw_stats->rx_fragment;
-+              stats->jabbers = hw_stats->rx_jabber;
-+              for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
-+                   i++) {
-+                      stats->hist[i] = hw_stats->rx_len[i];
-+                      stats->hist_tx[i] = hw_stats->tx_len[i];
-+              }
-+      } while (u64_stats_fetch_retry(&port->stats.syncp, start));
-+}
-+
-+static const struct net_device_ops airoha_netdev_ops = {
-+      .ndo_init               = airoha_dev_init,
-+      .ndo_open               = airoha_dev_open,
-+      .ndo_stop               = airoha_dev_stop,
-+      .ndo_start_xmit         = airoha_dev_xmit,
-+      .ndo_get_stats64        = airoha_dev_get_stats64,
-+      .ndo_set_mac_address    = airoha_dev_set_macaddr,
-+};
-+
-+static const struct ethtool_ops airoha_ethtool_ops = {
-+      .get_drvinfo            = airoha_ethtool_get_drvinfo,
-+      .get_eth_mac_stats      = airoha_ethtool_get_mac_stats,
-+      .get_rmon_stats         = airoha_ethtool_get_rmon_stats,
-+};
-+
-+static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
-+{
-+      const __be32 *id_ptr = of_get_property(np, "reg", NULL);
-+      struct airoha_gdm_port *port;
-+      struct net_device *dev;
-+      int err, index;
-+      u32 id;
-+
-+      if (!id_ptr) {
-+              dev_err(eth->dev, "missing gdm port id\n");
-+              return -EINVAL;
-+      }
-+
-+      id = be32_to_cpup(id_ptr);
-+      index = id - 1;
-+
-+      if (!id || id > ARRAY_SIZE(eth->ports)) {
-+              dev_err(eth->dev, "invalid gdm port id: %d\n", id);
-+              return -EINVAL;
-+      }
-+
-+      if (eth->ports[index]) {
-+              dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
-+              return -EINVAL;
-+      }
-+
-+      dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
-+                                    AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING);
-+      if (!dev) {
-+              dev_err(eth->dev, "alloc_etherdev failed\n");
-+              return -ENOMEM;
-+      }
-+
-+      dev->netdev_ops = &airoha_netdev_ops;
-+      dev->ethtool_ops = &airoha_ethtool_ops;
-+      dev->max_mtu = AIROHA_MAX_MTU;
-+      dev->watchdog_timeo = 5 * HZ;
-+      dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-+                         NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
-+                         NETIF_F_SG | NETIF_F_TSO;
-+      dev->features |= dev->hw_features;
-+      dev->dev.of_node = np;
-+      SET_NETDEV_DEV(dev, eth->dev);
-+
-+      err = of_get_ethdev_address(np, dev);
-+      if (err) {
-+              if (err == -EPROBE_DEFER)
-+                      return err;
-+
-+              eth_hw_addr_random(dev);
-+              dev_info(eth->dev, "generated random MAC address %pM\n",
-+                       dev->dev_addr);
-+      }
-+
-+      port = netdev_priv(dev);
-+      u64_stats_init(&port->stats.syncp);
-+      spin_lock_init(&port->stats.lock);
-+      port->dev = dev;
-+      port->eth = eth;
-+      port->id = id;
-+      eth->ports[index] = port;
-+
-+      return register_netdev(dev);
-+}
-+
-+static int airoha_probe(struct platform_device *pdev)
-+{
-+      struct device_node *np;
-+      struct airoha_eth *eth;
-+      int i, err;
-+
-+      eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
-+      if (!eth)
-+              return -ENOMEM;
-+
-+      eth->dev = &pdev->dev;
-+
-+      err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
-+      if (err) {
-+              dev_err(eth->dev, "failed configuring DMA mask\n");
-+              return err;
-+      }
-+
-+      eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
-+      if (IS_ERR(eth->fe_regs))
-+              return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
-+                                   "failed to iomap fe regs\n");
-+
-+      eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0");
-+      if (IS_ERR(eth->qdma_regs))
-+              return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs),
-+                                   "failed to iomap qdma regs\n");
-+
-+      eth->rsts[0].id = "fe";
-+      eth->rsts[1].id = "pdma";
-+      eth->rsts[2].id = "qdma";
-+      err = devm_reset_control_bulk_get_exclusive(eth->dev,
-+                                                  ARRAY_SIZE(eth->rsts),
-+                                                  eth->rsts);
-+      if (err) {
-+              dev_err(eth->dev, "failed to get bulk reset lines\n");
-+              return err;
-+      }
-+
-+      eth->xsi_rsts[0].id = "xsi-mac";
-+      eth->xsi_rsts[1].id = "hsi0-mac";
-+      eth->xsi_rsts[2].id = "hsi1-mac";
-+      eth->xsi_rsts[3].id = "hsi-mac";
-+      eth->xsi_rsts[4].id = "xfp-mac";
-+      err = devm_reset_control_bulk_get_exclusive(eth->dev,
-+                                                  ARRAY_SIZE(eth->xsi_rsts),
-+                                                  eth->xsi_rsts);
-+      if (err) {
-+              dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
-+              return err;
-+      }
-+
-+      spin_lock_init(&eth->irq_lock);
-+      eth->irq = platform_get_irq(pdev, 0);
-+      if (eth->irq < 0)
-+              return eth->irq;
-+
-+      eth->napi_dev = alloc_netdev_dummy(0);
-+      if (!eth->napi_dev)
-+              return -ENOMEM;
-+
-+      /* Enable threaded NAPI by default */
-+      eth->napi_dev->threaded = true;
-+      strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
-+      platform_set_drvdata(pdev, eth);
-+
-+      err = airoha_hw_init(eth);
-+      if (err)
-+              goto error;
-+
-+      airoha_qdma_start_napi(eth);
-+      for_each_child_of_node(pdev->dev.of_node, np) {
-+              if (!of_device_is_compatible(np, "airoha,eth-mac"))
-+                      continue;
-+
-+              if (!of_device_is_available(np))
-+                      continue;
-+
-+              err = airoha_alloc_gdm_port(eth, np);
-+              if (err) {
-+                      of_node_put(np);
-+                      goto error;
-+              }
-+      }
-+
-+      return 0;
-+
-+error:
-+      airoha_hw_cleanup(eth);
-+      for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
-+              struct airoha_gdm_port *port = eth->ports[i];
-+
-+              if (port && port->dev->reg_state == NETREG_REGISTERED)
-+                      unregister_netdev(port->dev);
-+      }
-+      free_netdev(eth->napi_dev);
-+      platform_set_drvdata(pdev, NULL);
-+
-+      return err;
-+}
-+
-+static void airoha_remove(struct platform_device *pdev)
-+{
-+      struct airoha_eth *eth = platform_get_drvdata(pdev);
-+      int i;
-+
-+      airoha_hw_cleanup(eth);
-+      for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
-+              struct airoha_gdm_port *port = eth->ports[i];
-+
-+              if (!port)
-+                      continue;
-+
-+              airoha_dev_stop(port->dev);
-+              unregister_netdev(port->dev);
-+      }
-+      free_netdev(eth->napi_dev);
-+
-+      platform_set_drvdata(pdev, NULL);
-+}
-+
-+static const struct of_device_id of_airoha_match[] = {
-+      { .compatible = "airoha,en7581-eth" },
-+      { /* sentinel */ }
-+};
-+
-+static struct platform_driver airoha_driver = {
-+      .probe = airoha_probe,
-+      .remove_new = airoha_remove,
-+      .driver = {
-+              .name = KBUILD_MODNAME,
-+              .of_match_table = of_airoha_match,
-+      },
-+};
-+module_platform_driver(airoha_driver);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
-+MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
diff --git a/target/linux/airoha/patches-6.6/007-v6.11-net-airoha-fix-error-branch-in-airoha_dev_xmit-and-a.patch b/target/linux/airoha/patches-6.6/007-v6.11-net-airoha-fix-error-branch-in-airoha_dev_xmit-and-a.patch
deleted file mode 100644 (file)
index 3f2d577..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-From 1f038d5897fe6b439039fc28420842abcc0d126b Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 17 Jul 2024 10:15:46 +0200
-Subject: [PATCH] net: airoha: fix error branch in airoha_dev_xmit and
- airoha_set_gdm_ports
-
-Fix error case management in airoha_dev_xmit routine since we need to
-DMA unmap pending buffers starting from q->head.
-Moreover fix a typo in error case branch in airoha_set_gdm_ports
-routine.
-
-Fixes: 23020f049327 ("net: airoha: Introduce ethernet support for EN7581 SoC")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/b628871bc8ae4861b5e2ab4db90aaf373cbb7cee.1721203880.git.lorenzo@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 10 ++++++----
- 1 file changed, 6 insertions(+), 4 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -977,7 +977,7 @@ static int airoha_set_gdm_ports(struct a
-       return 0;
- error:
--      for (i--; i >= 0; i++)
-+      for (i--; i >= 0; i--)
-               airoha_set_gdm_port(eth, port_list[i], false);
-       return err;
-@@ -2432,9 +2432,11 @@ static netdev_tx_t airoha_dev_xmit(struc
-       return NETDEV_TX_OK;
- error_unmap:
--      for (i--; i >= 0; i++)
--              dma_unmap_single(dev->dev.parent, q->entry[i].dma_addr,
--                               q->entry[i].dma_len, DMA_TO_DEVICE);
-+      for (i--; i >= 0; i--) {
-+              index = (q->head + i) % q->ndesc;
-+              dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
-+                               q->entry[index].dma_len, DMA_TO_DEVICE);
-+      }
-       spin_unlock_bh(&q->lock);
- error:
diff --git a/target/linux/airoha/patches-6.6/008-v6.11-net-airoha-Fix-NULL-pointer-dereference-in-airoha_qd.patch b/target/linux/airoha/patches-6.6/008-v6.11-net-airoha-Fix-NULL-pointer-dereference-in-airoha_qd.patch
deleted file mode 100644 (file)
index 4c8b361..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-From 4e076ff6ad5302c015617da30d877b4cdcbdf613 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 17 Jul 2024 10:47:19 +0200
-Subject: [PATCH] net: airoha: Fix NULL pointer dereference in
- airoha_qdma_cleanup_rx_queue()
-
-Move page_pool_get_dma_dir() inside the while loop of
-airoha_qdma_cleanup_rx_queue routine in order to avoid possible NULL
-pointer dereference if airoha_qdma_init_rx_queue() fails before
-properly allocating the page_pool pointer.
-
-Fixes: 23020f049327 ("net: airoha: Introduce ethernet support for EN7581 SoC")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/7330a41bba720c33abc039955f6172457a3a34f0.1721205981.git.lorenzo@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -1586,7 +1586,6 @@ static int airoha_qdma_init_rx_queue(str
- static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
- {
--      enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
-       struct airoha_eth *eth = q->eth;
-       while (q->queued) {
-@@ -1594,7 +1593,7 @@ static void airoha_qdma_cleanup_rx_queue
-               struct page *page = virt_to_head_page(e->buf);
-               dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
--                                      dir);
-+                                      page_pool_get_dma_dir(q->page_pool));
-               page_pool_put_full_page(q->page_pool, page, false);
-               q->tail = (q->tail + 1) % q->ndesc;
-               q->queued--;
diff --git a/target/linux/airoha/patches-6.6/009-v6.11-net-airoha-Fix-MBI_RX_AGE_SEL_MASK-definition.patch b/target/linux/airoha/patches-6.6/009-v6.11-net-airoha-Fix-MBI_RX_AGE_SEL_MASK-definition.patch
deleted file mode 100644 (file)
index 15385be..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-From 39a9c25bcdfb5e88995841c47439b74cac74a527 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 19 Jul 2024 22:38:31 +0200
-Subject: [PATCH] net: airoha: Fix MBI_RX_AGE_SEL_MASK definition
-
-Fix copy-paste error in MBI_RX_AGE_SEL_MASK macro definition
-
-Fixes: 23020f049327 ("net: airoha: Introduce ethernet support for EN7581 SoC")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/d27d0465be1bff3369e886e5f10c4d37fefc4934.1721419930.git.lorenzo@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -249,7 +249,7 @@
- #define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n)     (GDM_BASE(_n) + 0x2fc)
- #define REG_GDM2_CHN_RLS              (GDM2_BASE + 0x20)
--#define MBI_RX_AGE_SEL_MASK           GENMASK(18, 17)
-+#define MBI_RX_AGE_SEL_MASK           GENMASK(26, 25)
- #define MBI_TX_AGE_SEL_MASK           GENMASK(18, 17)
- #define REG_GDM3_FWD_CFG              GDM3_BASE
diff --git a/target/linux/airoha/patches-6.6/010-01-v6.12-net-airoha-Introduce-airoha_qdma-struct.patch b/target/linux/airoha/patches-6.6/010-01-v6.12-net-airoha-Introduce-airoha_qdma-struct.patch
deleted file mode 100644 (file)
index 3649e1c..0000000
+++ /dev/null
@@ -1,553 +0,0 @@
-From 16874d1cf3818a5804cded8eaff634122b1d6c7c Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 1 Aug 2024 16:35:03 +0200
-Subject: [PATCH 1/8] net: airoha: Introduce airoha_qdma struct
-
-Introduce airoha_qdma struct and move qdma IO register mapping in
-airoha_qdma. This is a preliminary patch to enable both QDMA controllers
-available on EN7581 SoC.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/7df163bdc72ee29c3d27a0cbf54522ffeeafe53c.1722522582.git.lorenzo@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 197 ++++++++++++---------
- 1 file changed, 112 insertions(+), 85 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -18,6 +18,7 @@
- #include <uapi/linux/ppp_defs.h>
- #define AIROHA_MAX_NUM_GDM_PORTS      1
-+#define AIROHA_MAX_NUM_QDMA           1
- #define AIROHA_MAX_NUM_RSTS           3
- #define AIROHA_MAX_NUM_XSI_RSTS               5
- #define AIROHA_MAX_MTU                        2000
-@@ -782,6 +783,10 @@ struct airoha_hw_stats {
-       u64 rx_len[7];
- };
-+struct airoha_qdma {
-+      void __iomem *regs;
-+};
-+
- struct airoha_gdm_port {
-       struct net_device *dev;
-       struct airoha_eth *eth;
-@@ -794,8 +799,6 @@ struct airoha_eth {
-       struct device *dev;
-       unsigned long state;
--
--      void __iomem *qdma_regs;
-       void __iomem *fe_regs;
-       /* protect concurrent irqmask accesses */
-@@ -806,6 +809,7 @@ struct airoha_eth {
-       struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
-       struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
-+      struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
-       struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
-       struct net_device *napi_dev;
-@@ -850,16 +854,16 @@ static u32 airoha_rmw(void __iomem *base
- #define airoha_fe_clear(eth, offset, val)                     \
-       airoha_rmw((eth)->fe_regs, (offset), (val), 0)
--#define airoha_qdma_rr(eth, offset)                           \
--      airoha_rr((eth)->qdma_regs, (offset))
--#define airoha_qdma_wr(eth, offset, val)                      \
--      airoha_wr((eth)->qdma_regs, (offset), (val))
--#define airoha_qdma_rmw(eth, offset, mask, val)                       \
--      airoha_rmw((eth)->qdma_regs, (offset), (mask), (val))
--#define airoha_qdma_set(eth, offset, val)                     \
--      airoha_rmw((eth)->qdma_regs, (offset), 0, (val))
--#define airoha_qdma_clear(eth, offset, val)                   \
--      airoha_rmw((eth)->qdma_regs, (offset), (val), 0)
-+#define airoha_qdma_rr(qdma, offset)                          \
-+      airoha_rr((qdma)->regs, (offset))
-+#define airoha_qdma_wr(qdma, offset, val)                     \
-+      airoha_wr((qdma)->regs, (offset), (val))
-+#define airoha_qdma_rmw(qdma, offset, mask, val)              \
-+      airoha_rmw((qdma)->regs, (offset), (mask), (val))
-+#define airoha_qdma_set(qdma, offset, val)                    \
-+      airoha_rmw((qdma)->regs, (offset), 0, (val))
-+#define airoha_qdma_clear(qdma, offset, val)                  \
-+      airoha_rmw((qdma)->regs, (offset), (val), 0)
- static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index,
-                                   u32 clear, u32 set)
-@@ -873,11 +877,12 @@ static void airoha_qdma_set_irqmask(stru
-       eth->irqmask[index] &= ~clear;
-       eth->irqmask[index] |= set;
--      airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]);
-+      airoha_qdma_wr(&eth->qdma[0], REG_INT_ENABLE(index),
-+                     eth->irqmask[index]);
-       /* Read irq_enable register in order to guarantee the update above
-        * completes in the spinlock critical section.
-        */
--      airoha_qdma_rr(eth, REG_INT_ENABLE(index));
-+      airoha_qdma_rr(&eth->qdma[0], REG_INT_ENABLE(index));
-       spin_unlock_irqrestore(&eth->irq_lock, flags);
- }
-@@ -1383,6 +1388,7 @@ static int airoha_fe_init(struct airoha_
- static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
- {
-       enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
-+      struct airoha_qdma *qdma = &q->eth->qdma[0];
-       struct airoha_eth *eth = q->eth;
-       int qid = q - &eth->q_rx[0];
-       int nframes = 0;
-@@ -1420,7 +1426,8 @@ static int airoha_qdma_fill_rx_queue(str
-               WRITE_ONCE(desc->msg2, 0);
-               WRITE_ONCE(desc->msg3, 0);
--              airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
-+              airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
-+                              RX_RING_CPU_IDX_MASK,
-                               FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
-       }
-@@ -1529,7 +1536,8 @@ static int airoha_qdma_rx_napi_poll(stru
- }
- static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
--                                   struct airoha_queue *q, int ndesc)
-+                                   struct airoha_queue *q,
-+                                   struct airoha_qdma *qdma, int ndesc)
- {
-       const struct page_pool_params pp_params = {
-               .order = 0,
-@@ -1569,14 +1577,15 @@ static int airoha_qdma_init_rx_queue(str
-       netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
--      airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr);
--      airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK,
-+      airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
-+      airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
-+                      RX_RING_SIZE_MASK,
-                       FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
-       thr = clamp(ndesc >> 3, 1, 32);
--      airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
-+      airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
-                       FIELD_PREP(RX_RING_THR_MASK, thr));
--      airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
-+      airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
-                       FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
-       airoha_qdma_fill_rx_queue(q);
-@@ -1600,7 +1609,8 @@ static void airoha_qdma_cleanup_rx_queue
-       }
- }
--static int airoha_qdma_init_rx(struct airoha_eth *eth)
-+static int airoha_qdma_init_rx(struct airoha_eth *eth,
-+                             struct airoha_qdma *qdma)
- {
-       int i;
-@@ -1613,7 +1623,7 @@ static int airoha_qdma_init_rx(struct ai
-               }
-               err = airoha_qdma_init_rx_queue(eth, &eth->q_rx[i],
--                                              RX_DSCP_NUM(i));
-+                                              qdma, RX_DSCP_NUM(i));
-               if (err)
-                       return err;
-       }
-@@ -1624,11 +1634,13 @@ static int airoha_qdma_init_rx(struct ai
- static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
- {
-       struct airoha_tx_irq_queue *irq_q;
-+      struct airoha_qdma *qdma;
-       struct airoha_eth *eth;
-       int id, done = 0;
-       irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
-       eth = irq_q->eth;
-+      qdma = &eth->qdma[0];
-       id = irq_q - &eth->q_tx_irq[0];
-       while (irq_q->queued > 0 && done < budget) {
-@@ -1698,9 +1710,9 @@ static int airoha_qdma_tx_napi_poll(stru
-               int i, len = done >> 7;
-               for (i = 0; i < len; i++)
--                      airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
-+                      airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
-                                       IRQ_CLEAR_LEN_MASK, 0x80);
--              airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
-+              airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
-                               IRQ_CLEAR_LEN_MASK, (done & 0x7f));
-       }
-@@ -1712,7 +1724,8 @@ static int airoha_qdma_tx_napi_poll(stru
- }
- static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
--                                   struct airoha_queue *q, int size)
-+                                   struct airoha_queue *q,
-+                                   struct airoha_qdma *qdma, int size)
- {
-       int i, qid = q - &eth->q_tx[0];
-       dma_addr_t dma_addr;
-@@ -1739,10 +1752,10 @@ static int airoha_qdma_init_tx_queue(str
-               WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
-       }
--      airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr);
--      airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
-+      airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
-+      airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
-                       FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
--      airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
-+      airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
-                       FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
-       return 0;
-@@ -1750,7 +1763,7 @@ static int airoha_qdma_init_tx_queue(str
- static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
-                                  struct airoha_tx_irq_queue *irq_q,
--                                 int size)
-+                                 struct airoha_qdma *qdma, int size)
- {
-       int id = irq_q - &eth->q_tx_irq[0];
-       dma_addr_t dma_addr;
-@@ -1766,29 +1779,30 @@ static int airoha_qdma_tx_irq_init(struc
-       irq_q->size = size;
-       irq_q->eth = eth;
--      airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr);
--      airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
-+      airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
-+      airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
-                       FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
--      airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
-+      airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
-                       FIELD_PREP(TX_IRQ_THR_MASK, 1));
-       return 0;
- }
--static int airoha_qdma_init_tx(struct airoha_eth *eth)
-+static int airoha_qdma_init_tx(struct airoha_eth *eth,
-+                             struct airoha_qdma *qdma)
- {
-       int i, err;
-       for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
-               err = airoha_qdma_tx_irq_init(eth, &eth->q_tx_irq[i],
--                                            IRQ_QUEUE_LEN(i));
-+                                            qdma, IRQ_QUEUE_LEN(i));
-               if (err)
-                       return err;
-       }
-       for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
-               err = airoha_qdma_init_tx_queue(eth, &eth->q_tx[i],
--                                              TX_DSCP_NUM);
-+                                              qdma, TX_DSCP_NUM);
-               if (err)
-                       return err;
-       }
-@@ -1815,7 +1829,8 @@ static void airoha_qdma_cleanup_tx_queue
-       spin_unlock_bh(&q->lock);
- }
--static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth)
-+static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth,
-+                                      struct airoha_qdma *qdma)
- {
-       dma_addr_t dma_addr;
-       u32 status;
-@@ -1827,7 +1842,7 @@ static int airoha_qdma_init_hfwd_queues(
-       if (!eth->hfwd.desc)
-               return -ENOMEM;
--      airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr);
-+      airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
-       size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
-       eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
-@@ -1835,14 +1850,14 @@ static int airoha_qdma_init_hfwd_queues(
-       if (!eth->hfwd.q)
-               return -ENOMEM;
--      airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr);
-+      airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
--      airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG,
-+      airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
-                       HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
-                       FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
--      airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
-+      airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
-                       FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
--      airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG,
-+      airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
-                       LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
-                       HW_FWD_DESC_NUM_MASK,
-                       FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
-@@ -1850,67 +1865,69 @@ static int airoha_qdma_init_hfwd_queues(
-       return read_poll_timeout(airoha_qdma_rr, status,
-                                !(status & LMGR_INIT_START), USEC_PER_MSEC,
--                               30 * USEC_PER_MSEC, true, eth,
-+                               30 * USEC_PER_MSEC, true, qdma,
-                                REG_LMGR_INIT_CFG);
- }
--static void airoha_qdma_init_qos(struct airoha_eth *eth)
-+static void airoha_qdma_init_qos(struct airoha_eth *eth,
-+                               struct airoha_qdma *qdma)
- {
--      airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
--      airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
-+      airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
-+      airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
--      airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG,
-+      airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
-                         PSE_BUF_ESTIMATE_EN_MASK);
--      airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG,
-+      airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
-                       EGRESS_RATE_METER_EN_MASK |
-                       EGRESS_RATE_METER_EQ_RATE_EN_MASK);
-       /* 2047us x 31 = 63.457ms */
--      airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
-+      airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
-                       EGRESS_RATE_METER_WINDOW_SZ_MASK,
-                       FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
--      airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
-+      airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
-                       EGRESS_RATE_METER_TIMESLICE_MASK,
-                       FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
-       /* ratelimit init */
--      airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
-+      airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
-       /* fast-tick 25us */
--      airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
-+      airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
-                       FIELD_PREP(GLB_FAST_TICK_MASK, 25));
--      airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
-+      airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
-                       FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
--      airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
--      airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
-+      airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
-+      airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
-                       FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
--      airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG,
-+      airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
-                       EGRESS_SLOW_TICK_RATIO_MASK,
-                       FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
--      airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
--      airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG,
-+      airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
-+      airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
-                         INGRESS_TRTCM_MODE_MASK);
--      airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
-+      airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
-                       FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
--      airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG,
-+      airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
-                       INGRESS_SLOW_TICK_RATIO_MASK,
-                       FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
--      airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
--      airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
-+      airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
-+      airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
-                       FIELD_PREP(SLA_FAST_TICK_MASK, 25));
--      airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
-+      airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
-                       FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
- }
--static int airoha_qdma_hw_init(struct airoha_eth *eth)
-+static int airoha_qdma_hw_init(struct airoha_eth *eth,
-+                             struct airoha_qdma *qdma)
- {
-       int i;
-       /* clear pending irqs */
-       for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++)
--              airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff);
-+              airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
-       /* setup irqs */
-       airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
-@@ -1923,14 +1940,14 @@ static int airoha_qdma_hw_init(struct ai
-                       continue;
-               if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
--                      airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i),
-+                      airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
-                                       TX_RING_IRQ_BLOCKING_CFG_MASK);
-               else
--                      airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i),
-+                      airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
-                                         TX_RING_IRQ_BLOCKING_CFG_MASK);
-       }
--      airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG,
-+      airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
-                      GLOBAL_CFG_RX_2B_OFFSET_MASK |
-                      FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
-                      GLOBAL_CFG_CPU_TXR_RR_MASK |
-@@ -1941,18 +1958,18 @@ static int airoha_qdma_hw_init(struct ai
-                      GLOBAL_CFG_TX_WB_DONE_MASK |
-                      FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
--      airoha_qdma_init_qos(eth);
-+      airoha_qdma_init_qos(eth, qdma);
-       /* disable qdma rx delay interrupt */
-       for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
-               if (!eth->q_rx[i].ndesc)
-                       continue;
--              airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i),
-+              airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
-                                 RX_DELAY_INT_MASK);
-       }
--      airoha_qdma_set(eth, REG_TXQ_CNGST_CFG,
-+      airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
-                       TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
-       return 0;
-@@ -1962,12 +1979,14 @@ static irqreturn_t airoha_irq_handler(in
- {
-       struct airoha_eth *eth = dev_instance;
-       u32 intr[ARRAY_SIZE(eth->irqmask)];
-+      struct airoha_qdma *qdma;
-       int i;
-+      qdma = &eth->qdma[0];
-       for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) {
--              intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i));
-+              intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
-               intr[i] &= eth->irqmask[i];
--              airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]);
-+              airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
-       }
-       if (!test_bit(DEV_STATE_INITIALIZED, &eth->state))
-@@ -1997,7 +2016,7 @@ static irqreturn_t airoha_irq_handler(in
-                       airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0,
-                                               TX_DONE_INT_MASK(i));
--                      status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i));
-+                      status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
-                       head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
-                       irq_q->head = head % irq_q->size;
-                       irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
-@@ -2011,6 +2030,7 @@ static irqreturn_t airoha_irq_handler(in
- static int airoha_qdma_init(struct airoha_eth *eth)
- {
-+      struct airoha_qdma *qdma = &eth->qdma[0];
-       int err;
-       err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler,
-@@ -2018,19 +2038,19 @@ static int airoha_qdma_init(struct airoh
-       if (err)
-               return err;
--      err = airoha_qdma_init_rx(eth);
-+      err = airoha_qdma_init_rx(eth, qdma);
-       if (err)
-               return err;
--      err = airoha_qdma_init_tx(eth);
-+      err = airoha_qdma_init_tx(eth, qdma);
-       if (err)
-               return err;
--      err = airoha_qdma_init_hfwd_queues(eth);
-+      err = airoha_qdma_init_hfwd_queues(eth, qdma);
-       if (err)
-               return err;
--      err = airoha_qdma_hw_init(eth);
-+      err = airoha_qdma_hw_init(eth, qdma);
-       if (err)
-               return err;
-@@ -2263,8 +2283,9 @@ static int airoha_dev_open(struct net_de
-               airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id),
-                               GDM_STAG_EN_MASK);
--      airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
--      airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
-+      airoha_qdma_set(&eth->qdma[0], REG_QDMA_GLOBAL_CFG,
-+                      GLOBAL_CFG_TX_DMA_EN_MASK |
-+                      GLOBAL_CFG_RX_DMA_EN_MASK);
-       return 0;
- }
-@@ -2280,8 +2301,9 @@ static int airoha_dev_stop(struct net_de
-       if (err)
-               return err;
--      airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
--      airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
-+      airoha_qdma_clear(&eth->qdma[0], REG_QDMA_GLOBAL_CFG,
-+                        GLOBAL_CFG_TX_DMA_EN_MASK |
-+                        GLOBAL_CFG_RX_DMA_EN_MASK);
-       return 0;
- }
-@@ -2341,6 +2363,7 @@ static netdev_tx_t airoha_dev_xmit(struc
-       struct airoha_eth *eth = port->eth;
-       u32 nr_frags = 1 + sinfo->nr_frags;
-       struct netdev_queue *txq;
-+      struct airoha_qdma *qdma;
-       struct airoha_queue *q;
-       void *data = skb->data;
-       u16 index;
-@@ -2368,6 +2391,7 @@ static netdev_tx_t airoha_dev_xmit(struc
-       msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
-              FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
-+      qdma = &eth->qdma[0];
-       q = &eth->q_tx[qid];
-       if (WARN_ON_ONCE(!q->ndesc))
-               goto error;
-@@ -2412,7 +2436,8 @@ static netdev_tx_t airoha_dev_xmit(struc
-               e->dma_addr = addr;
-               e->dma_len = len;
--              airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
-+              airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
-+                              TX_RING_CPU_IDX_MASK,
-                               FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
-               data = skb_frag_address(frag);
-@@ -2614,9 +2639,11 @@ static int airoha_probe(struct platform_
-               return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
-                                    "failed to iomap fe regs\n");
--      eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0");
--      if (IS_ERR(eth->qdma_regs))
--              return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs),
-+      eth->qdma[0].regs = devm_platform_ioremap_resource_byname(pdev,
-+                                                                "qdma0");
-+      if (IS_ERR(eth->qdma[0].regs))
-+              return dev_err_probe(eth->dev,
-+                                   PTR_ERR(eth->qdma[0].regs),
-                                    "failed to iomap qdma regs\n");
-       eth->rsts[0].id = "fe";
diff --git a/target/linux/airoha/patches-6.6/010-02-v6.12-net-airoha-Move-airoha_queues-in-airoha_qdma.patch b/target/linux/airoha/patches-6.6/010-02-v6.12-net-airoha-Move-airoha_queues-in-airoha_qdma.patch
deleted file mode 100644 (file)
index 853a785..0000000
+++ /dev/null
@@ -1,318 +0,0 @@
-From 245c7bc86b198e5ec227eba6b582da73cb0721c8 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 1 Aug 2024 16:35:04 +0200
-Subject: [PATCH 2/8] net: airoha: Move airoha_queues in airoha_qdma
-
-QDMA controllers available in EN7581 SoC have independent tx/rx hw queues
-so move them in airoha_queues structure.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/795fc4797bffbf7f0a1351308aa9bf0e65b5126e.1722522582.git.lorenzo@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 126 +++++++++++----------
- 1 file changed, 65 insertions(+), 61 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -785,6 +785,17 @@ struct airoha_hw_stats {
- struct airoha_qdma {
-       void __iomem *regs;
-+
-+      struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
-+
-+      struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
-+      struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-+
-+      /* descriptor and packet buffers for qdma hw forward */
-+      struct {
-+              void *desc;
-+              void *q;
-+      } hfwd;
- };
- struct airoha_gdm_port {
-@@ -809,20 +820,10 @@ struct airoha_eth {
-       struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
-       struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
--      struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
--      struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
--
-       struct net_device *napi_dev;
--      struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
--      struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
--
--      struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
--      /* descriptor and packet buffers for qdma hw forward */
--      struct {
--              void *desc;
--              void *q;
--      } hfwd;
-+      struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
-+      struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
- };
- static u32 airoha_rr(void __iomem *base, u32 offset)
-@@ -1390,7 +1391,7 @@ static int airoha_qdma_fill_rx_queue(str
-       enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
-       struct airoha_qdma *qdma = &q->eth->qdma[0];
-       struct airoha_eth *eth = q->eth;
--      int qid = q - &eth->q_rx[0];
-+      int qid = q - &qdma->q_rx[0];
-       int nframes = 0;
-       while (q->queued < q->ndesc - 1) {
-@@ -1457,8 +1458,9 @@ static int airoha_qdma_get_gdm_port(stru
- static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
- {
-       enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
-+      struct airoha_qdma *qdma = &q->eth->qdma[0];
-       struct airoha_eth *eth = q->eth;
--      int qid = q - &eth->q_rx[0];
-+      int qid = q - &qdma->q_rx[0];
-       int done = 0;
-       while (done < budget) {
-@@ -1550,7 +1552,7 @@ static int airoha_qdma_init_rx_queue(str
-               .dev = eth->dev,
-               .napi = &q->napi,
-       };
--      int qid = q - &eth->q_rx[0], thr;
-+      int qid = q - &qdma->q_rx[0], thr;
-       dma_addr_t dma_addr;
-       q->buf_size = PAGE_SIZE / 2;
-@@ -1614,7 +1616,7 @@ static int airoha_qdma_init_rx(struct ai
- {
-       int i;
--      for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-               int err;
-               if (!(RX_DONE_INT_MASK & BIT(i))) {
-@@ -1622,7 +1624,7 @@ static int airoha_qdma_init_rx(struct ai
-                       continue;
-               }
--              err = airoha_qdma_init_rx_queue(eth, &eth->q_rx[i],
-+              err = airoha_qdma_init_rx_queue(eth, &qdma->q_rx[i],
-                                               qdma, RX_DSCP_NUM(i));
-               if (err)
-                       return err;
-@@ -1641,7 +1643,7 @@ static int airoha_qdma_tx_napi_poll(stru
-       irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
-       eth = irq_q->eth;
-       qdma = &eth->qdma[0];
--      id = irq_q - &eth->q_tx_irq[0];
-+      id = irq_q - &qdma->q_tx_irq[0];
-       while (irq_q->queued > 0 && done < budget) {
-               u32 qid, last, val = irq_q->q[irq_q->head];
-@@ -1658,10 +1660,10 @@ static int airoha_qdma_tx_napi_poll(stru
-               last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
-               qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
--              if (qid >= ARRAY_SIZE(eth->q_tx))
-+              if (qid >= ARRAY_SIZE(qdma->q_tx))
-                       continue;
--              q = &eth->q_tx[qid];
-+              q = &qdma->q_tx[qid];
-               if (!q->ndesc)
-                       continue;
-@@ -1727,7 +1729,7 @@ static int airoha_qdma_init_tx_queue(str
-                                    struct airoha_queue *q,
-                                    struct airoha_qdma *qdma, int size)
- {
--      int i, qid = q - &eth->q_tx[0];
-+      int i, qid = q - &qdma->q_tx[0];
-       dma_addr_t dma_addr;
-       spin_lock_init(&q->lock);
-@@ -1765,7 +1767,7 @@ static int airoha_qdma_tx_irq_init(struc
-                                  struct airoha_tx_irq_queue *irq_q,
-                                  struct airoha_qdma *qdma, int size)
- {
--      int id = irq_q - &eth->q_tx_irq[0];
-+      int id = irq_q - &qdma->q_tx_irq[0];
-       dma_addr_t dma_addr;
-       netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
-@@ -1793,15 +1795,15 @@ static int airoha_qdma_init_tx(struct ai
- {
-       int i, err;
--      for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
--              err = airoha_qdma_tx_irq_init(eth, &eth->q_tx_irq[i],
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
-+              err = airoha_qdma_tx_irq_init(eth, &qdma->q_tx_irq[i],
-                                             qdma, IRQ_QUEUE_LEN(i));
-               if (err)
-                       return err;
-       }
--      for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
--              err = airoha_qdma_init_tx_queue(eth, &eth->q_tx[i],
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-+              err = airoha_qdma_init_tx_queue(eth, &qdma->q_tx[i],
-                                               qdma, TX_DSCP_NUM);
-               if (err)
-                       return err;
-@@ -1837,17 +1839,17 @@ static int airoha_qdma_init_hfwd_queues(
-       int size;
-       size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
--      eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
--                                           GFP_KERNEL);
--      if (!eth->hfwd.desc)
-+      qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
-+                                            GFP_KERNEL);
-+      if (!qdma->hfwd.desc)
-               return -ENOMEM;
-       airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
-       size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
--      eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
--                                        GFP_KERNEL);
--      if (!eth->hfwd.q)
-+      qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
-+                                         GFP_KERNEL);
-+      if (!qdma->hfwd.q)
-               return -ENOMEM;
-       airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
-@@ -1935,8 +1937,8 @@ static int airoha_qdma_hw_init(struct ai
-       airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
-       /* setup irq binding */
--      for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
--              if (!eth->q_tx[i].ndesc)
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-+              if (!qdma->q_tx[i].ndesc)
-                       continue;
-               if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
-@@ -1961,8 +1963,8 @@ static int airoha_qdma_hw_init(struct ai
-       airoha_qdma_init_qos(eth, qdma);
-       /* disable qdma rx delay interrupt */
--      for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
--              if (!eth->q_rx[i].ndesc)
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-+              if (!qdma->q_rx[i].ndesc)
-                       continue;
-               airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
-@@ -1996,18 +1998,18 @@ static irqreturn_t airoha_irq_handler(in
-               airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
-                                       RX_DONE_INT_MASK);
--              for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
--                      if (!eth->q_rx[i].ndesc)
-+              for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-+                      if (!qdma->q_rx[i].ndesc)
-                               continue;
-                       if (intr[1] & BIT(i))
--                              napi_schedule(&eth->q_rx[i].napi);
-+                              napi_schedule(&qdma->q_rx[i].napi);
-               }
-       }
-       if (intr[0] & INT_TX_MASK) {
--              for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
--                      struct airoha_tx_irq_queue *irq_q = &eth->q_tx_irq[i];
-+              for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
-+                      struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i];
-                       u32 status, head;
-                       if (!(intr[0] & TX_DONE_INT_MASK(i)))
-@@ -2021,7 +2023,7 @@ static irqreturn_t airoha_irq_handler(in
-                       irq_q->head = head % irq_q->size;
-                       irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
--                      napi_schedule(&eth->q_tx_irq[i].napi);
-+                      napi_schedule(&qdma->q_tx_irq[i].napi);
-               }
-       }
-@@ -2080,44 +2082,46 @@ static int airoha_hw_init(struct airoha_
- static void airoha_hw_cleanup(struct airoha_eth *eth)
- {
-+      struct airoha_qdma *qdma = &eth->qdma[0];
-       int i;
--      for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
--              if (!eth->q_rx[i].ndesc)
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-+              if (!qdma->q_rx[i].ndesc)
-                       continue;
--              napi_disable(&eth->q_rx[i].napi);
--              netif_napi_del(&eth->q_rx[i].napi);
--              airoha_qdma_cleanup_rx_queue(&eth->q_rx[i]);
--              if (eth->q_rx[i].page_pool)
--                      page_pool_destroy(eth->q_rx[i].page_pool);
-+              napi_disable(&qdma->q_rx[i].napi);
-+              netif_napi_del(&qdma->q_rx[i].napi);
-+              airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
-+              if (qdma->q_rx[i].page_pool)
-+                      page_pool_destroy(qdma->q_rx[i].page_pool);
-       }
--      for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
--              napi_disable(&eth->q_tx_irq[i].napi);
--              netif_napi_del(&eth->q_tx_irq[i].napi);
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
-+              napi_disable(&qdma->q_tx_irq[i].napi);
-+              netif_napi_del(&qdma->q_tx_irq[i].napi);
-       }
--      for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
--              if (!eth->q_tx[i].ndesc)
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-+              if (!qdma->q_tx[i].ndesc)
-                       continue;
--              airoha_qdma_cleanup_tx_queue(&eth->q_tx[i]);
-+              airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
-       }
- }
- static void airoha_qdma_start_napi(struct airoha_eth *eth)
- {
-+      struct airoha_qdma *qdma = &eth->qdma[0];
-       int i;
--      for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++)
--              napi_enable(&eth->q_tx_irq[i].napi);
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
-+              napi_enable(&qdma->q_tx_irq[i].napi);
--      for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
--              if (!eth->q_rx[i].ndesc)
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-+              if (!qdma->q_rx[i].ndesc)
-                       continue;
--              napi_enable(&eth->q_rx[i].napi);
-+              napi_enable(&qdma->q_rx[i].napi);
-       }
- }
-@@ -2392,7 +2396,7 @@ static netdev_tx_t airoha_dev_xmit(struc
-              FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
-       qdma = &eth->qdma[0];
--      q = &eth->q_tx[qid];
-+      q = &qdma->q_tx[qid];
-       if (WARN_ON_ONCE(!q->ndesc))
-               goto error;
diff --git a/target/linux/airoha/patches-6.6/010-03-v6.12-net-airoha-Move-irq_mask-in-airoha_qdma-structure.patch b/target/linux/airoha/patches-6.6/010-03-v6.12-net-airoha-Move-irq_mask-in-airoha_qdma-structure.patch
deleted file mode 100644 (file)
index 9f05ad4..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-From 19e47fc2aeda3a657c4f64144ffd6e65f7a66601 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 1 Aug 2024 16:35:05 +0200
-Subject: [PATCH 3/8] net: airoha: Move irq_mask in airoha_qdma structure
-
-QDMA controllers have independent irq lines, so move irqmask in
-airoha_qdma structure. This is a preliminary patch to support multiple
-QDMA controllers.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/1c8a06e8be605278a7b2f3cd8ac06e74bf5ebf2b.1722522582.git.lorenzo@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 84 +++++++++++-----------
- 1 file changed, 42 insertions(+), 42 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -786,6 +786,11 @@ struct airoha_hw_stats {
- struct airoha_qdma {
-       void __iomem *regs;
-+      /* protect concurrent irqmask accesses */
-+      spinlock_t irq_lock;
-+      u32 irqmask[QDMA_INT_REG_MAX];
-+      int irq;
-+
-       struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
-       struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
-@@ -812,11 +817,6 @@ struct airoha_eth {
-       unsigned long state;
-       void __iomem *fe_regs;
--      /* protect concurrent irqmask accesses */
--      spinlock_t irq_lock;
--      u32 irqmask[QDMA_INT_REG_MAX];
--      int irq;
--
-       struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
-       struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
-@@ -866,38 +866,37 @@ static u32 airoha_rmw(void __iomem *base
- #define airoha_qdma_clear(qdma, offset, val)                  \
-       airoha_rmw((qdma)->regs, (offset), (val), 0)
--static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index,
-+static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
-                                   u32 clear, u32 set)
- {
-       unsigned long flags;
--      if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask)))
-+      if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
-               return;
--      spin_lock_irqsave(&eth->irq_lock, flags);
-+      spin_lock_irqsave(&qdma->irq_lock, flags);
--      eth->irqmask[index] &= ~clear;
--      eth->irqmask[index] |= set;
--      airoha_qdma_wr(&eth->qdma[0], REG_INT_ENABLE(index),
--                     eth->irqmask[index]);
-+      qdma->irqmask[index] &= ~clear;
-+      qdma->irqmask[index] |= set;
-+      airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
-       /* Read irq_enable register in order to guarantee the update above
-        * completes in the spinlock critical section.
-        */
--      airoha_qdma_rr(&eth->qdma[0], REG_INT_ENABLE(index));
-+      airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
--      spin_unlock_irqrestore(&eth->irq_lock, flags);
-+      spin_unlock_irqrestore(&qdma->irq_lock, flags);
- }
--static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index,
-+static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
-                                  u32 mask)
- {
--      airoha_qdma_set_irqmask(eth, index, 0, mask);
-+      airoha_qdma_set_irqmask(qdma, index, 0, mask);
- }
--static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index,
-+static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
-                                   u32 mask)
- {
--      airoha_qdma_set_irqmask(eth, index, mask, 0);
-+      airoha_qdma_set_irqmask(qdma, index, mask, 0);
- }
- static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr)
-@@ -1522,7 +1521,7 @@ static int airoha_qdma_rx_process(struct
- static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
- {
-       struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
--      struct airoha_eth *eth = q->eth;
-+      struct airoha_qdma *qdma = &q->eth->qdma[0];
-       int cur, done = 0;
-       do {
-@@ -1531,7 +1530,7 @@ static int airoha_qdma_rx_napi_poll(stru
-       } while (cur && done < budget);
-       if (done < budget && napi_complete(napi))
--              airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1,
-+              airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1,
-                                      RX_DONE_INT_MASK);
-       return done;
-@@ -1719,7 +1718,7 @@ static int airoha_qdma_tx_napi_poll(stru
-       }
-       if (done < budget && napi_complete(napi))
--              airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0,
-+              airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
-                                      TX_DONE_INT_MASK(id));
-       return done;
-@@ -1928,13 +1927,13 @@ static int airoha_qdma_hw_init(struct ai
-       int i;
-       /* clear pending irqs */
--      for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++)
-+      for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
-               airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
-       /* setup irqs */
--      airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
--      airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
--      airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
-+      airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
-+      airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
-+      airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
-       /* setup irq binding */
-       for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-@@ -1980,14 +1979,13 @@ static int airoha_qdma_hw_init(struct ai
- static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
- {
-       struct airoha_eth *eth = dev_instance;
--      u32 intr[ARRAY_SIZE(eth->irqmask)];
--      struct airoha_qdma *qdma;
-+      struct airoha_qdma *qdma = &eth->qdma[0];
-+      u32 intr[ARRAY_SIZE(qdma->irqmask)];
-       int i;
--      qdma = &eth->qdma[0];
--      for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) {
-+      for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
-               intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
--              intr[i] &= eth->irqmask[i];
-+              intr[i] &= qdma->irqmask[i];
-               airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
-       }
-@@ -1995,7 +1993,7 @@ static irqreturn_t airoha_irq_handler(in
-               return IRQ_NONE;
-       if (intr[1] & RX_DONE_INT_MASK) {
--              airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
-+              airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
-                                       RX_DONE_INT_MASK);
-               for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-@@ -2015,7 +2013,7 @@ static irqreturn_t airoha_irq_handler(in
-                       if (!(intr[0] & TX_DONE_INT_MASK(i)))
-                               continue;
--                      airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0,
-+                      airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
-                                               TX_DONE_INT_MASK(i));
-                       status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
-@@ -2030,12 +2028,18 @@ static irqreturn_t airoha_irq_handler(in
-       return IRQ_HANDLED;
- }
--static int airoha_qdma_init(struct airoha_eth *eth)
-+static int airoha_qdma_init(struct platform_device *pdev,
-+                          struct airoha_eth *eth)
- {
-       struct airoha_qdma *qdma = &eth->qdma[0];
-       int err;
--      err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler,
-+      spin_lock_init(&qdma->irq_lock);
-+      qdma->irq = platform_get_irq(pdev, 0);
-+      if (qdma->irq < 0)
-+              return qdma->irq;
-+
-+      err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
-                              IRQF_SHARED, KBUILD_MODNAME, eth);
-       if (err)
-               return err;
-@@ -2061,7 +2065,8 @@ static int airoha_qdma_init(struct airoh
-       return 0;
- }
--static int airoha_hw_init(struct airoha_eth *eth)
-+static int airoha_hw_init(struct platform_device *pdev,
-+                        struct airoha_eth *eth)
- {
-       int err;
-@@ -2077,7 +2082,7 @@ static int airoha_hw_init(struct airoha_
-       if (err)
-               return err;
--      return airoha_qdma_init(eth);
-+      return airoha_qdma_init(pdev, eth);
- }
- static void airoha_hw_cleanup(struct airoha_eth *eth)
-@@ -2674,11 +2679,6 @@ static int airoha_probe(struct platform_
-               return err;
-       }
--      spin_lock_init(&eth->irq_lock);
--      eth->irq = platform_get_irq(pdev, 0);
--      if (eth->irq < 0)
--              return eth->irq;
--
-       eth->napi_dev = alloc_netdev_dummy(0);
-       if (!eth->napi_dev)
-               return -ENOMEM;
-@@ -2688,7 +2688,7 @@ static int airoha_probe(struct platform_
-       strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
-       platform_set_drvdata(pdev, eth);
--      err = airoha_hw_init(eth);
-+      err = airoha_hw_init(pdev, eth);
-       if (err)
-               goto error;
diff --git a/target/linux/airoha/patches-6.6/010-04-v6.12-net-airoha-Add-airoha_qdma-pointer-in-airoha_tx_irq_.patch b/target/linux/airoha/patches-6.6/010-04-v6.12-net-airoha-Add-airoha_qdma-pointer-in-airoha_tx_irq_.patch
deleted file mode 100644 (file)
index b73fc34..0000000
+++ /dev/null
@@ -1,306 +0,0 @@
-From 9a2500ab22f059e596942172a8e4a60ae8243ce4 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 1 Aug 2024 16:35:06 +0200
-Subject: [PATCH 4/8] net: airoha: Add airoha_qdma pointer in
- airoha_tx_irq_queue/airoha_queue structures
-
-Move airoha_eth pointer in airoha_qdma structure from
-airoha_tx_irq_queue/airoha_queue ones. This is a preliminary patch to
-introduce support for multi-QDMA controllers available on EN7581.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/074565b82fd0ceefe66e186f21133d825dbd48eb.1722522582.git.lorenzo@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 84 +++++++++++-----------
- 1 file changed, 41 insertions(+), 43 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -728,7 +728,7 @@ struct airoha_queue_entry {
- };
- struct airoha_queue {
--      struct airoha_eth *eth;
-+      struct airoha_qdma *qdma;
-       /* protect concurrent queue accesses */
-       spinlock_t lock;
-@@ -747,7 +747,7 @@ struct airoha_queue {
- };
- struct airoha_tx_irq_queue {
--      struct airoha_eth *eth;
-+      struct airoha_qdma *qdma;
-       struct napi_struct napi;
-       u32 *q;
-@@ -784,6 +784,7 @@ struct airoha_hw_stats {
- };
- struct airoha_qdma {
-+      struct airoha_eth *eth;
-       void __iomem *regs;
-       /* protect concurrent irqmask accesses */
-@@ -1388,8 +1389,8 @@ static int airoha_fe_init(struct airoha_
- static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
- {
-       enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
--      struct airoha_qdma *qdma = &q->eth->qdma[0];
--      struct airoha_eth *eth = q->eth;
-+      struct airoha_qdma *qdma = q->qdma;
-+      struct airoha_eth *eth = qdma->eth;
-       int qid = q - &qdma->q_rx[0];
-       int nframes = 0;
-@@ -1457,8 +1458,8 @@ static int airoha_qdma_get_gdm_port(stru
- static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
- {
-       enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
--      struct airoha_qdma *qdma = &q->eth->qdma[0];
--      struct airoha_eth *eth = q->eth;
-+      struct airoha_qdma *qdma = q->qdma;
-+      struct airoha_eth *eth = qdma->eth;
-       int qid = q - &qdma->q_rx[0];
-       int done = 0;
-@@ -1521,7 +1522,6 @@ static int airoha_qdma_rx_process(struct
- static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
- {
-       struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
--      struct airoha_qdma *qdma = &q->eth->qdma[0];
-       int cur, done = 0;
-       do {
-@@ -1530,14 +1530,13 @@ static int airoha_qdma_rx_napi_poll(stru
-       } while (cur && done < budget);
-       if (done < budget && napi_complete(napi))
--              airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1,
-+              airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
-                                      RX_DONE_INT_MASK);
-       return done;
- }
--static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
--                                   struct airoha_queue *q,
-+static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
-                                    struct airoha_qdma *qdma, int ndesc)
- {
-       const struct page_pool_params pp_params = {
-@@ -1548,15 +1547,16 @@ static int airoha_qdma_init_rx_queue(str
-               .dma_dir = DMA_FROM_DEVICE,
-               .max_len = PAGE_SIZE,
-               .nid = NUMA_NO_NODE,
--              .dev = eth->dev,
-+              .dev = qdma->eth->dev,
-               .napi = &q->napi,
-       };
-+      struct airoha_eth *eth = qdma->eth;
-       int qid = q - &qdma->q_rx[0], thr;
-       dma_addr_t dma_addr;
-       q->buf_size = PAGE_SIZE / 2;
-       q->ndesc = ndesc;
--      q->eth = eth;
-+      q->qdma = qdma;
-       q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
-                               GFP_KERNEL);
-@@ -1596,7 +1596,7 @@ static int airoha_qdma_init_rx_queue(str
- static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
- {
--      struct airoha_eth *eth = q->eth;
-+      struct airoha_eth *eth = q->qdma->eth;
-       while (q->queued) {
-               struct airoha_queue_entry *e = &q->entry[q->tail];
-@@ -1610,8 +1610,7 @@ static void airoha_qdma_cleanup_rx_queue
-       }
- }
--static int airoha_qdma_init_rx(struct airoha_eth *eth,
--                             struct airoha_qdma *qdma)
-+static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
- {
-       int i;
-@@ -1623,8 +1622,8 @@ static int airoha_qdma_init_rx(struct ai
-                       continue;
-               }
--              err = airoha_qdma_init_rx_queue(eth, &qdma->q_rx[i],
--                                              qdma, RX_DSCP_NUM(i));
-+              err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
-+                                              RX_DSCP_NUM(i));
-               if (err)
-                       return err;
-       }
-@@ -1640,9 +1639,9 @@ static int airoha_qdma_tx_napi_poll(stru
-       int id, done = 0;
-       irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
--      eth = irq_q->eth;
--      qdma = &eth->qdma[0];
-+      qdma = irq_q->qdma;
-       id = irq_q - &qdma->q_tx_irq[0];
-+      eth = qdma->eth;
-       while (irq_q->queued > 0 && done < budget) {
-               u32 qid, last, val = irq_q->q[irq_q->head];
-@@ -1724,16 +1723,16 @@ static int airoha_qdma_tx_napi_poll(stru
-       return done;
- }
--static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
--                                   struct airoha_queue *q,
-+static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
-                                    struct airoha_qdma *qdma, int size)
- {
-+      struct airoha_eth *eth = qdma->eth;
-       int i, qid = q - &qdma->q_tx[0];
-       dma_addr_t dma_addr;
-       spin_lock_init(&q->lock);
-       q->ndesc = size;
--      q->eth = eth;
-+      q->qdma = qdma;
-       q->free_thr = 1 + MAX_SKB_FRAGS;
-       q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
-@@ -1762,11 +1761,11 @@ static int airoha_qdma_init_tx_queue(str
-       return 0;
- }
--static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
--                                 struct airoha_tx_irq_queue *irq_q,
-+static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
-                                  struct airoha_qdma *qdma, int size)
- {
-       int id = irq_q - &qdma->q_tx_irq[0];
-+      struct airoha_eth *eth = qdma->eth;
-       dma_addr_t dma_addr;
-       netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
-@@ -1778,7 +1777,7 @@ static int airoha_qdma_tx_irq_init(struc
-       memset(irq_q->q, 0xff, size * sizeof(u32));
-       irq_q->size = size;
--      irq_q->eth = eth;
-+      irq_q->qdma = qdma;
-       airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
-       airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
-@@ -1789,21 +1788,20 @@ static int airoha_qdma_tx_irq_init(struc
-       return 0;
- }
--static int airoha_qdma_init_tx(struct airoha_eth *eth,
--                             struct airoha_qdma *qdma)
-+static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
- {
-       int i, err;
-       for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
--              err = airoha_qdma_tx_irq_init(eth, &qdma->q_tx_irq[i],
--                                            qdma, IRQ_QUEUE_LEN(i));
-+              err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
-+                                            IRQ_QUEUE_LEN(i));
-               if (err)
-                       return err;
-       }
-       for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
--              err = airoha_qdma_init_tx_queue(eth, &qdma->q_tx[i],
--                                              qdma, TX_DSCP_NUM);
-+              err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
-+                                              TX_DSCP_NUM);
-               if (err)
-                       return err;
-       }
-@@ -1813,7 +1811,7 @@ static int airoha_qdma_init_tx(struct ai
- static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
- {
--      struct airoha_eth *eth = q->eth;
-+      struct airoha_eth *eth = q->qdma->eth;
-       spin_lock_bh(&q->lock);
-       while (q->queued) {
-@@ -1830,9 +1828,9 @@ static void airoha_qdma_cleanup_tx_queue
-       spin_unlock_bh(&q->lock);
- }
--static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth,
--                                      struct airoha_qdma *qdma)
-+static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
- {
-+      struct airoha_eth *eth = qdma->eth;
-       dma_addr_t dma_addr;
-       u32 status;
-       int size;
-@@ -1870,8 +1868,7 @@ static int airoha_qdma_init_hfwd_queues(
-                                REG_LMGR_INIT_CFG);
- }
--static void airoha_qdma_init_qos(struct airoha_eth *eth,
--                               struct airoha_qdma *qdma)
-+static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
- {
-       airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
-       airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
-@@ -1921,8 +1918,7 @@ static void airoha_qdma_init_qos(struct
-                       FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
- }
--static int airoha_qdma_hw_init(struct airoha_eth *eth,
--                             struct airoha_qdma *qdma)
-+static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
- {
-       int i;
-@@ -1959,7 +1955,7 @@ static int airoha_qdma_hw_init(struct ai
-                      GLOBAL_CFG_TX_WB_DONE_MASK |
-                      FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
--      airoha_qdma_init_qos(eth, qdma);
-+      airoha_qdma_init_qos(qdma);
-       /* disable qdma rx delay interrupt */
-       for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-@@ -2035,6 +2031,8 @@ static int airoha_qdma_init(struct platf
-       int err;
-       spin_lock_init(&qdma->irq_lock);
-+      qdma->eth = eth;
-+
-       qdma->irq = platform_get_irq(pdev, 0);
-       if (qdma->irq < 0)
-               return qdma->irq;
-@@ -2044,19 +2042,19 @@ static int airoha_qdma_init(struct platf
-       if (err)
-               return err;
--      err = airoha_qdma_init_rx(eth, qdma);
-+      err = airoha_qdma_init_rx(qdma);
-       if (err)
-               return err;
--      err = airoha_qdma_init_tx(eth, qdma);
-+      err = airoha_qdma_init_tx(qdma);
-       if (err)
-               return err;
--      err = airoha_qdma_init_hfwd_queues(eth, qdma);
-+      err = airoha_qdma_init_hfwd_queues(qdma);
-       if (err)
-               return err;
--      err = airoha_qdma_hw_init(eth, qdma);
-+      err = airoha_qdma_hw_init(qdma);
-       if (err)
-               return err;
diff --git a/target/linux/airoha/patches-6.6/010-05-v6.12-net-airoha-Use-qdma-pointer-as-private-structure-in-.patch b/target/linux/airoha/patches-6.6/010-05-v6.12-net-airoha-Use-qdma-pointer-as-private-structure-in-.patch
deleted file mode 100644 (file)
index 9cabd10..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-From e3d6bfdfc0aeb8c1d7965413b1050ec07f9761e5 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 1 Aug 2024 16:35:07 +0200
-Subject: [PATCH 5/8] net: airoha: Use qdma pointer as private structure in
- airoha_irq_handler routine
-
-This is a preliminary patch to support multi-QDMA controllers.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/1e40c3cb973881c0eb3c3c247c78550da62054ab.1722522582.git.lorenzo@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 7 +++----
- 1 file changed, 3 insertions(+), 4 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -1974,8 +1974,7 @@ static int airoha_qdma_hw_init(struct ai
- static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
- {
--      struct airoha_eth *eth = dev_instance;
--      struct airoha_qdma *qdma = &eth->qdma[0];
-+      struct airoha_qdma *qdma = dev_instance;
-       u32 intr[ARRAY_SIZE(qdma->irqmask)];
-       int i;
-@@ -1985,7 +1984,7 @@ static irqreturn_t airoha_irq_handler(in
-               airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
-       }
--      if (!test_bit(DEV_STATE_INITIALIZED, &eth->state))
-+      if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
-               return IRQ_NONE;
-       if (intr[1] & RX_DONE_INT_MASK) {
-@@ -2038,7 +2037,7 @@ static int airoha_qdma_init(struct platf
-               return qdma->irq;
-       err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
--                             IRQF_SHARED, KBUILD_MODNAME, eth);
-+                             IRQF_SHARED, KBUILD_MODNAME, qdma);
-       if (err)
-               return err;
diff --git a/target/linux/airoha/patches-6.6/010-06-v6.12-net-airoha-Allow-mapping-IO-region-for-multiple-qdma.patch b/target/linux/airoha/patches-6.6/010-06-v6.12-net-airoha-Allow-mapping-IO-region-for-multiple-qdma.patch
deleted file mode 100644 (file)
index ebc7318..0000000
+++ /dev/null
@@ -1,131 +0,0 @@
-From e618447cf492d04415007336eec025fae6e9a2ea Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 1 Aug 2024 16:35:08 +0200
-Subject: [PATCH 6/8] net: airoha: Allow mapping IO region for multiple qdma
- controllers
-
-Map MMIO regions of both qdma controllers available on EN7581 SoC.
-Run airoha_hw_cleanup routine for both QDMA controllers available on
-EN7581 SoC removing airoha_eth module or in airoha_probe error path.
-This is a preliminary patch to support multi-QDMA controllers.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/a734ae608da14b67ae749b375d880dbbc70868ea.1722522582.git.lorenzo@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 56 ++++++++++++----------
- 1 file changed, 32 insertions(+), 24 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -2024,15 +2024,25 @@ static irqreturn_t airoha_irq_handler(in
- }
- static int airoha_qdma_init(struct platform_device *pdev,
--                          struct airoha_eth *eth)
-+                          struct airoha_eth *eth,
-+                          struct airoha_qdma *qdma)
- {
--      struct airoha_qdma *qdma = &eth->qdma[0];
--      int err;
-+      int err, id = qdma - &eth->qdma[0];
-+      const char *res;
-       spin_lock_init(&qdma->irq_lock);
-       qdma->eth = eth;
--      qdma->irq = platform_get_irq(pdev, 0);
-+      res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
-+      if (!res)
-+              return -ENOMEM;
-+
-+      qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
-+      if (IS_ERR(qdma->regs))
-+              return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
-+                                   "failed to iomap qdma%d regs\n", id);
-+
-+      qdma->irq = platform_get_irq(pdev, 4 * id);
-       if (qdma->irq < 0)
-               return qdma->irq;
-@@ -2053,19 +2063,13 @@ static int airoha_qdma_init(struct platf
-       if (err)
-               return err;
--      err = airoha_qdma_hw_init(qdma);
--      if (err)
--              return err;
--
--      set_bit(DEV_STATE_INITIALIZED, &eth->state);
--
--      return 0;
-+      return airoha_qdma_hw_init(qdma);
- }
- static int airoha_hw_init(struct platform_device *pdev,
-                         struct airoha_eth *eth)
- {
--      int err;
-+      int err, i;
-       /* disable xsi */
-       reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts);
-@@ -2079,12 +2083,19 @@ static int airoha_hw_init(struct platfor
-       if (err)
-               return err;
--      return airoha_qdma_init(pdev, eth);
-+      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
-+              err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
-+              if (err)
-+                      return err;
-+      }
-+
-+      set_bit(DEV_STATE_INITIALIZED, &eth->state);
-+
-+      return 0;
- }
--static void airoha_hw_cleanup(struct airoha_eth *eth)
-+static void airoha_hw_cleanup(struct airoha_qdma *qdma)
- {
--      struct airoha_qdma *qdma = &eth->qdma[0];
-       int i;
-       for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-@@ -2645,13 +2656,6 @@ static int airoha_probe(struct platform_
-               return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
-                                    "failed to iomap fe regs\n");
--      eth->qdma[0].regs = devm_platform_ioremap_resource_byname(pdev,
--                                                                "qdma0");
--      if (IS_ERR(eth->qdma[0].regs))
--              return dev_err_probe(eth->dev,
--                                   PTR_ERR(eth->qdma[0].regs),
--                                   "failed to iomap qdma regs\n");
--
-       eth->rsts[0].id = "fe";
-       eth->rsts[1].id = "pdma";
-       eth->rsts[2].id = "qdma";
-@@ -2707,7 +2711,9 @@ static int airoha_probe(struct platform_
-       return 0;
- error:
--      airoha_hw_cleanup(eth);
-+      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
-+              airoha_hw_cleanup(&eth->qdma[i]);
-+
-       for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
-               struct airoha_gdm_port *port = eth->ports[i];
-@@ -2725,7 +2731,9 @@ static void airoha_remove(struct platfor
-       struct airoha_eth *eth = platform_get_drvdata(pdev);
-       int i;
--      airoha_hw_cleanup(eth);
-+      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
-+              airoha_hw_cleanup(&eth->qdma[i]);
-+
-       for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
-               struct airoha_gdm_port *port = eth->ports[i];
diff --git a/target/linux/airoha/patches-6.6/010-07-v6.12-net-airoha-Start-all-qdma-NAPIs-in-airoha_probe.patch b/target/linux/airoha/patches-6.6/010-07-v6.12-net-airoha-Start-all-qdma-NAPIs-in-airoha_probe.patch
deleted file mode 100644 (file)
index c9a99f1..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-From 160231e34b8e9512ba20530f3e68fb0ac499af87 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 1 Aug 2024 16:35:09 +0200
-Subject: [PATCH 7/8] net: airoha: Start all qdma NAPIs in airoha_probe()
-
-This is a preliminary patch to support multi-QDMA controllers.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/b51cf69c94d8cbc81e0a0b35587f024d01e6d9c0.1722522582.git.lorenzo@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 7 ++++---
- 1 file changed, 4 insertions(+), 3 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -2122,9 +2122,8 @@ static void airoha_hw_cleanup(struct air
-       }
- }
--static void airoha_qdma_start_napi(struct airoha_eth *eth)
-+static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
- {
--      struct airoha_qdma *qdma = &eth->qdma[0];
-       int i;
-       for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
-@@ -2693,7 +2692,9 @@ static int airoha_probe(struct platform_
-       if (err)
-               goto error;
--      airoha_qdma_start_napi(eth);
-+      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
-+              airoha_qdma_start_napi(&eth->qdma[i]);
-+
-       for_each_child_of_node(pdev->dev.of_node, np) {
-               if (!of_device_is_compatible(np, "airoha,eth-mac"))
-                       continue;
diff --git a/target/linux/airoha/patches-6.6/010-08-v6.12-net-airoha-Link-the-gdm-port-to-the-selected-qdma-co.patch b/target/linux/airoha/patches-6.6/010-08-v6.12-net-airoha-Link-the-gdm-port-to-the-selected-qdma-co.patch
deleted file mode 100644 (file)
index 1e89cf1..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-From 9304640f2f78147dddf97a5ea01502ae175e41d9 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 1 Aug 2024 16:35:10 +0200
-Subject: [PATCH 8/8] net: airoha: Link the gdm port to the selected qdma
- controller
-
-Link the running gdm port to the qdma controller used to connect with
-the CPU. Moreover, load all QDMA controllers available on EN7581 SoC.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/95b515df34ba4727f7ae5b14a1d0462cceec84ff.1722522582.git.lorenzo@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 37 +++++++++++-----------
- 1 file changed, 19 insertions(+), 18 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -18,7 +18,7 @@
- #include <uapi/linux/ppp_defs.h>
- #define AIROHA_MAX_NUM_GDM_PORTS      1
--#define AIROHA_MAX_NUM_QDMA           1
-+#define AIROHA_MAX_NUM_QDMA           2
- #define AIROHA_MAX_NUM_RSTS           3
- #define AIROHA_MAX_NUM_XSI_RSTS               5
- #define AIROHA_MAX_MTU                        2000
-@@ -805,8 +805,8 @@ struct airoha_qdma {
- };
- struct airoha_gdm_port {
-+      struct airoha_qdma *qdma;
-       struct net_device *dev;
--      struct airoha_eth *eth;
-       int id;
-       struct airoha_hw_stats stats;
-@@ -2139,7 +2139,7 @@ static void airoha_qdma_start_napi(struc
- static void airoha_update_hw_stats(struct airoha_gdm_port *port)
- {
--      struct airoha_eth *eth = port->eth;
-+      struct airoha_eth *eth = port->qdma->eth;
-       u32 val, i = 0;
-       spin_lock(&port->stats.lock);
-@@ -2284,22 +2284,22 @@ static void airoha_update_hw_stats(struc
- static int airoha_dev_open(struct net_device *dev)
- {
-       struct airoha_gdm_port *port = netdev_priv(dev);
--      struct airoha_eth *eth = port->eth;
-+      struct airoha_qdma *qdma = port->qdma;
-       int err;
-       netif_tx_start_all_queues(dev);
--      err = airoha_set_gdm_ports(eth, true);
-+      err = airoha_set_gdm_ports(qdma->eth, true);
-       if (err)
-               return err;
-       if (netdev_uses_dsa(dev))
--              airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id),
-+              airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
-                             GDM_STAG_EN_MASK);
-       else
--              airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id),
-+              airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
-                               GDM_STAG_EN_MASK);
--      airoha_qdma_set(&eth->qdma[0], REG_QDMA_GLOBAL_CFG,
-+      airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
-                       GLOBAL_CFG_TX_DMA_EN_MASK |
-                       GLOBAL_CFG_RX_DMA_EN_MASK);
-@@ -2309,15 +2309,15 @@ static int airoha_dev_open(struct net_de
- static int airoha_dev_stop(struct net_device *dev)
- {
-       struct airoha_gdm_port *port = netdev_priv(dev);
--      struct airoha_eth *eth = port->eth;
-+      struct airoha_qdma *qdma = port->qdma;
-       int err;
-       netif_tx_disable(dev);
--      err = airoha_set_gdm_ports(eth, false);
-+      err = airoha_set_gdm_ports(qdma->eth, false);
-       if (err)
-               return err;
--      airoha_qdma_clear(&eth->qdma[0], REG_QDMA_GLOBAL_CFG,
-+      airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
-                         GLOBAL_CFG_TX_DMA_EN_MASK |
-                         GLOBAL_CFG_RX_DMA_EN_MASK);
-@@ -2333,7 +2333,7 @@ static int airoha_dev_set_macaddr(struct
-       if (err)
-               return err;
--      airoha_set_macaddr(port->eth, dev->dev_addr);
-+      airoha_set_macaddr(port->qdma->eth, dev->dev_addr);
-       return 0;
- }
-@@ -2342,7 +2342,7 @@ static int airoha_dev_init(struct net_de
- {
-       struct airoha_gdm_port *port = netdev_priv(dev);
--      airoha_set_macaddr(port->eth, dev->dev_addr);
-+      airoha_set_macaddr(port->qdma->eth, dev->dev_addr);
-       return 0;
- }
-@@ -2376,10 +2376,9 @@ static netdev_tx_t airoha_dev_xmit(struc
-       struct airoha_gdm_port *port = netdev_priv(dev);
-       u32 msg0 = 0, msg1, len = skb_headlen(skb);
-       int i, qid = skb_get_queue_mapping(skb);
--      struct airoha_eth *eth = port->eth;
-+      struct airoha_qdma *qdma = port->qdma;
-       u32 nr_frags = 1 + sinfo->nr_frags;
-       struct netdev_queue *txq;
--      struct airoha_qdma *qdma;
-       struct airoha_queue *q;
-       void *data = skb->data;
-       u16 index;
-@@ -2407,7 +2406,6 @@ static netdev_tx_t airoha_dev_xmit(struc
-       msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
-              FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
--      qdma = &eth->qdma[0];
-       q = &qdma->q_tx[qid];
-       if (WARN_ON_ONCE(!q->ndesc))
-               goto error;
-@@ -2490,7 +2488,7 @@ static void airoha_ethtool_get_drvinfo(s
-                                      struct ethtool_drvinfo *info)
- {
-       struct airoha_gdm_port *port = netdev_priv(dev);
--      struct airoha_eth *eth = port->eth;
-+      struct airoha_eth *eth = port->qdma->eth;
-       strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
-       strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
-@@ -2571,6 +2569,7 @@ static int airoha_alloc_gdm_port(struct
- {
-       const __be32 *id_ptr = of_get_property(np, "reg", NULL);
-       struct airoha_gdm_port *port;
-+      struct airoha_qdma *qdma;
-       struct net_device *dev;
-       int err, index;
-       u32 id;
-@@ -2600,6 +2599,7 @@ static int airoha_alloc_gdm_port(struct
-               return -ENOMEM;
-       }
-+      qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
-       dev->netdev_ops = &airoha_netdev_ops;
-       dev->ethtool_ops = &airoha_ethtool_ops;
-       dev->max_mtu = AIROHA_MAX_MTU;
-@@ -2609,6 +2609,7 @@ static int airoha_alloc_gdm_port(struct
-                          NETIF_F_SG | NETIF_F_TSO;
-       dev->features |= dev->hw_features;
-       dev->dev.of_node = np;
-+      dev->irq = qdma->irq;
-       SET_NETDEV_DEV(dev, eth->dev);
-       err = of_get_ethdev_address(np, dev);
-@@ -2624,8 +2625,8 @@ static int airoha_alloc_gdm_port(struct
-       port = netdev_priv(dev);
-       u64_stats_init(&port->stats.syncp);
-       spin_lock_init(&port->stats.lock);
-+      port->qdma = qdma;
-       port->dev = dev;
--      port->eth = eth;
-       port->id = id;
-       eth->ports[index] = port;
diff --git a/target/linux/airoha/patches-6.6/011-v6.12-net-airoha-honor-reset-return-value-in-airoha_hw_ini.patch b/target/linux/airoha/patches-6.6/011-v6.12-net-airoha-honor-reset-return-value-in-airoha_hw_ini.patch
deleted file mode 100644 (file)
index ed25ccb..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-From 63a796b4988c3dca83176a534890b510d44f105a Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sat, 3 Aug 2024 17:50:50 +0200
-Subject: [PATCH] net: airoha: honor reset return value in airoha_hw_init()
-
-Take into account return value from reset_control_bulk_assert and
-reset_control_bulk_deassert routines in airoha_hw_init().
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/f49dc04a87653e0155f4fab3e3eb584785c8ad6a.1722699555.git.lorenzo@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 16 ++++++++++++----
- 1 file changed, 12 insertions(+), 4 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -2072,13 +2072,21 @@ static int airoha_hw_init(struct platfor
-       int err, i;
-       /* disable xsi */
--      reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts);
-+      err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
-+                                      eth->xsi_rsts);
-+      if (err)
-+              return err;
-+
-+      err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
-+      if (err)
-+              return err;
--      reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
--      msleep(20);
--      reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
-       msleep(20);
-+      err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
-+      if (err)
-+              return err;
-+      msleep(20);
-       err = airoha_fe_init(eth);
-       if (err)
-               return err;
diff --git a/target/linux/airoha/patches-6.6/012-v6.12-net-airoha-configure-hw-mac-address-according-to-the.patch b/target/linux/airoha/patches-6.6/012-v6.12-net-airoha-configure-hw-mac-address-according-to-the.patch
deleted file mode 100644 (file)
index da23955..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-From 812a2751e827fa1eb01f3bd268b4d74c23f4226a Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 21 Aug 2024 09:30:14 +0200
-Subject: [PATCH] net: airoha: configure hw mac address according to the port
- id
-
-GDM1 port on EN7581 SoC is connected to the lan dsa switch.
-GDM{2,3,4} can be used as wan port connected to an external
-phy module. Configure hw mac address registers according to the port id.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20240821-airoha-eth-wan-mac-addr-v2-1-8706d0cd6cd5@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 32 ++++++++++++++++------
- 1 file changed, 23 insertions(+), 9 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -67,9 +67,11 @@
- #define FE_RST_GDM3_MBI_ARB_MASK      BIT(2)
- #define FE_RST_CORE_MASK              BIT(0)
-+#define REG_FE_WAN_MAC_H              0x0030
- #define REG_FE_LAN_MAC_H              0x0040
--#define REG_FE_LAN_MAC_LMIN           0x0044
--#define REG_FE_LAN_MAC_LMAX           0x0048
-+
-+#define REG_FE_MAC_LMIN(_n)           ((_n) + 0x04)
-+#define REG_FE_MAC_LMAX(_n)           ((_n) + 0x08)
- #define REG_FE_CDM1_OQ_MAP0           0x0050
- #define REG_FE_CDM1_OQ_MAP1           0x0054
-@@ -900,16 +902,28 @@ static void airoha_qdma_irq_disable(stru
-       airoha_qdma_set_irqmask(qdma, index, mask, 0);
- }
--static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr)
-+static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
- {
--      u32 val;
-+      /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
-+       * GDM{2,3,4} can be used as wan port connected to an external
-+       * phy module.
-+       */
-+      return port->id == 1;
-+}
-+
-+static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
-+{
-+      struct airoha_eth *eth = port->qdma->eth;
-+      u32 val, reg;
-+      reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
-+                                         : REG_FE_WAN_MAC_H;
-       val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
--      airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val);
-+      airoha_fe_wr(eth, reg, val);
-       val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
--      airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val);
--      airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val);
-+      airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
-+      airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
- }
- static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
-@@ -2341,7 +2355,7 @@ static int airoha_dev_set_macaddr(struct
-       if (err)
-               return err;
--      airoha_set_macaddr(port->qdma->eth, dev->dev_addr);
-+      airoha_set_macaddr(port, dev->dev_addr);
-       return 0;
- }
-@@ -2350,7 +2364,7 @@ static int airoha_dev_init(struct net_de
- {
-       struct airoha_gdm_port *port = netdev_priv(dev);
--      airoha_set_macaddr(port->qdma->eth, dev->dev_addr);
-+      airoha_set_macaddr(port, dev->dev_addr);
-       return 0;
- }
diff --git a/target/linux/airoha/patches-6.6/013-v6.12-net-airoha-fix-module-autoloading.patch b/target/linux/airoha/patches-6.6/013-v6.12-net-airoha-fix-module-autoloading.patch
deleted file mode 100644 (file)
index 63c6162..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-From 7d2bd8ac9d2494cf9b16c4b00df9424ad24ed18c Mon Sep 17 00:00:00 2001
-From: Liao Chen <liaochen4@huawei.com>
-Date: Mon, 26 Aug 2024 09:18:58 +0000
-Subject: [PATCH] net: airoha: fix module autoloading
-
-Add MODULE_DEVICE_TABLE(), so modules could be properly autoloaded
-based on the alias from of_device_id table.
-
-Signed-off-by: Liao Chen <liaochen4@huawei.com>
-Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20240826091858.369910-4-liaochen4@huawei.com
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -2776,6 +2776,7 @@ static const struct of_device_id of_airo
-       { .compatible = "airoha,en7581-eth" },
-       { /* sentinel */ }
- };
-+MODULE_DEVICE_TABLE(of, of_airoha_match);
- static struct platform_driver airoha_driver = {
-       .probe = airoha_probe,
diff --git a/target/linux/airoha/patches-6.6/014-01-v6.13-net-airoha-fix-PSE-memory-configuration-in-airoha_fe.patch b/target/linux/airoha/patches-6.6/014-01-v6.13-net-airoha-fix-PSE-memory-configuration-in-airoha_fe.patch
deleted file mode 100644 (file)
index fb86423..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-From 8e38e08f2c560328a873c35aff1a0dbea6a7d084 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 1 Oct 2024 12:10:25 +0200
-Subject: [PATCH 2/2] net: airoha: fix PSE memory configuration in
- airoha_fe_pse_ports_init()
-
-Align PSE memory configuration to vendor SDK. In particular, increase
-initial value of PSE reserved memory in airoha_fe_pse_ports_init()
-routine by the value used for the second Packet Processor Engine (PPE2)
-and do not overwrite the default value.
-
-Introduced by commit 23020f049327 ("net: airoha: Introduce ethernet support
-for EN7581 SoC")
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20241001-airoha-eth-pse-fix-v2-2-9a56cdffd074@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 6 ++++--
- 1 file changed, 4 insertions(+), 2 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -1166,11 +1166,13 @@ static void airoha_fe_pse_ports_init(str
-               [FE_PSE_PORT_GDM4] = 2,
-               [FE_PSE_PORT_CDM5] = 2,
-       };
-+      u32 all_rsv;
-       int q;
-+      all_rsv = airoha_fe_get_pse_all_rsv(eth);
-       /* hw misses PPE2 oq rsv */
--      airoha_fe_set(eth, REG_FE_PSE_BUF_SET,
--                    PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]);
-+      all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
-+      airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
-       /* CMD1 */
-       for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
diff --git a/target/linux/airoha/patches-6.6/014-02-v6.13-net-airoha-read-default-PSE-reserved-pages-value-bef.patch b/target/linux/airoha/patches-6.6/014-02-v6.13-net-airoha-read-default-PSE-reserved-pages-value-bef.patch
deleted file mode 100644 (file)
index a2e5c4f..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-From 1f3e7ff4f296af1f4350f457d5bd82bc825e645a Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 1 Oct 2024 12:10:24 +0200
-Subject: [PATCH 1/2] net: airoha: read default PSE reserved pages value before
- updating
-
-Store the default value for the number of PSE reserved pages in orig_val
-at the beginning of airoha_fe_set_pse_oq_rsv routine, before updating it
-with airoha_fe_set_pse_queue_rsv_pages().
-Introduce airoha_fe_get_pse_all_rsv utility routine.
-
-Introduced by commit 23020f049327 ("net: airoha: Introduce ethernet support
-for EN7581 SoC")
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20241001-airoha-eth-pse-fix-v2-1-9a56cdffd074@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 14 ++++++++++----
- 1 file changed, 10 insertions(+), 4 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -1116,17 +1116,23 @@ static void airoha_fe_set_pse_queue_rsv_
-                     PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
- }
-+static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
-+{
-+      u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
-+
-+      return FIELD_GET(PSE_ALLRSV_MASK, val);
-+}
-+
- static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
-                                   u32 port, u32 queue, u32 val)
- {
--      u32 orig_val, tmp, all_rsv, fq_limit;
-+      u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
-+      u32 tmp, all_rsv, fq_limit;
-       airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
-       /* modify all rsv */
--      orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
--      tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
--      all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp);
-+      all_rsv = airoha_fe_get_pse_all_rsv(eth);
-       all_rsv += (val - orig_val);
-       airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
-                     FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
diff --git a/target/linux/airoha/patches-6.6/015-v6.12-net-airoha-Update-tx-cpu-dma-ring-idx-at-the-end-of-.patch b/target/linux/airoha/patches-6.6/015-v6.12-net-airoha-Update-tx-cpu-dma-ring-idx-at-the-end-of-.patch
deleted file mode 100644 (file)
index db6cc9c..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-From 3dc6e998d18bfba6e0dc979d3cc68eba98dfeef7 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 4 Oct 2024 15:51:26 +0200
-Subject: [PATCH] net: airoha: Update tx cpu dma ring idx at the end of xmit
- loop
-
-Move the tx cpu dma ring index update out of transmit loop of
-airoha_dev_xmit routine in order to not start transmitting the packet
-before it is fully DMA mapped (e.g. fragmented skbs).
-
-Fixes: 23020f049327 ("net: airoha: Introduce ethernet support for EN7581 SoC")
-Reported-by: Felix Fietkau <nbd@nbd.name>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20241004-airoha-eth-7581-mapping-fix-v1-1-8e4279ab1812@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 9 +++++----
- 1 file changed, 5 insertions(+), 4 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -2480,10 +2480,6 @@ static netdev_tx_t airoha_dev_xmit(struc
-               e->dma_addr = addr;
-               e->dma_len = len;
--              airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
--                              TX_RING_CPU_IDX_MASK,
--                              FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
--
-               data = skb_frag_address(frag);
-               len = skb_frag_size(frag);
-       }
-@@ -2492,6 +2488,11 @@ static netdev_tx_t airoha_dev_xmit(struc
-       q->queued += i;
-       skb_tx_timestamp(skb);
-+      if (!netdev_xmit_more())
-+              airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
-+                              TX_RING_CPU_IDX_MASK,
-+                              FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
-+
-       if (q->ndesc - q->queued < q->free_thr)
-               netif_tx_stop_queue(txq);
diff --git a/target/linux/airoha/patches-6.6/016-v6.13-net-airoha-Fix-EGRESS_RATE_METER_EN_MASK-definition.patch b/target/linux/airoha/patches-6.6/016-v6.13-net-airoha-Fix-EGRESS_RATE_METER_EN_MASK-definition.patch
deleted file mode 100644 (file)
index d70cadf..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-From 2518b119639162251b6cc7195aec394930c1d867 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 9 Oct 2024 00:21:47 +0200
-Subject: [PATCH] net: airoha: Fix EGRESS_RATE_METER_EN_MASK definition
-
-Fix typo in EGRESS_RATE_METER_EN_MASK mask definition. This bus in not
-introducing any user visible problem since, even if we are setting
-EGRESS_RATE_METER_EN_MASK bit in REG_EGRESS_RATE_METER_CFG register,
-egress QoS metering is not supported yet since we are missing some other
-hw configurations (e.g token bucket rate, token bucket size).
-
-Introduced by commit 23020f049327 ("net: airoha: Introduce ethernet support
-for EN7581 SoC")
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20241009-airoha-fixes-v2-1-18af63ec19bf@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -554,7 +554,7 @@
- #define FWD_DSCP_LOW_THR_MASK         GENMASK(17, 0)
- #define REG_EGRESS_RATE_METER_CFG             0x100c
--#define EGRESS_RATE_METER_EN_MASK             BIT(29)
-+#define EGRESS_RATE_METER_EN_MASK             BIT(31)
- #define EGRESS_RATE_METER_EQ_RATE_EN_MASK     BIT(17)
- #define EGRESS_RATE_METER_WINDOW_SZ_MASK      GENMASK(16, 12)
- #define EGRESS_RATE_METER_TIMESLICE_MASK      GENMASK(10, 0)
diff --git a/target/linux/airoha/patches-6.6/017-v6.13-net-airoha-Implement-BQL-support.patch b/target/linux/airoha/patches-6.6/017-v6.13-net-airoha-Implement-BQL-support.patch
deleted file mode 100644 (file)
index b6bb9f6..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-From 1d304174106c93ce05f6088813ad7203b3eb381a Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sat, 12 Oct 2024 11:01:11 +0200
-Subject: [PATCH] net: airoha: Implement BQL support
-
-Introduce BQL support in the airoha_eth driver reporting to the kernel
-info about tx hw DMA queues in order to avoid bufferbloat and keep the
-latency small.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20241012-en7581-bql-v2-1-4deb4efdb60b@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 8 ++++++--
- 1 file changed, 6 insertions(+), 2 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -1710,9 +1710,11 @@ static int airoha_qdma_tx_napi_poll(stru
-                       WRITE_ONCE(desc->msg1, 0);
-                       if (skb) {
-+                              u16 queue = skb_get_queue_mapping(skb);
-                               struct netdev_queue *txq;
--                              txq = netdev_get_tx_queue(skb->dev, qid);
-+                              txq = netdev_get_tx_queue(skb->dev, queue);
-+                              netdev_tx_completed_queue(txq, 1, skb->len);
-                               if (netif_tx_queue_stopped(txq) &&
-                                   q->ndesc - q->queued >= q->free_thr)
-                                       netif_tx_wake_queue(txq);
-@@ -2488,7 +2490,9 @@ static netdev_tx_t airoha_dev_xmit(struc
-       q->queued += i;
-       skb_tx_timestamp(skb);
--      if (!netdev_xmit_more())
-+      netdev_tx_sent_queue(txq, skb->len);
-+
-+      if (netif_xmit_stopped(txq) || !netdev_xmit_more())
-               airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
-                               TX_RING_CPU_IDX_MASK,
-                               FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
diff --git a/target/linux/airoha/patches-6.6/018-01-v6.10-clk-en7523-Add-en_clk_soc_data-data-structure.patch b/target/linux/airoha/patches-6.6/018-01-v6.10-clk-en7523-Add-en_clk_soc_data-data-structure.patch
deleted file mode 100644 (file)
index 1e19356..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-From 457e74667f452d7f071ad2b2d9313ec62ebc4b02 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sat, 6 Apr 2024 12:43:43 +0200
-Subject: [PATCH 1/2] clk: en7523: Add en_clk_soc_data data structure
-
-Introduce en_clk_soc_data data structure in order to define multiple
-clk_ops for each supported SoC. This is a preliminary patch to
-introduce EN7581 clock support.
-
-Tested-by: Zhengping Zhang <zhengping.zhang@airoha.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/562a0da8d7874a02a324687c152c87a1549924bd.1712399981.git.lorenzo@kernel.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 34 +++++++++++++++++++++-------------
- 1 file changed, 21 insertions(+), 13 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -3,8 +3,8 @@
- #include <linux/delay.h>
- #include <linux/clk-provider.h>
- #include <linux/io.h>
--#include <linux/of.h>
- #include <linux/platform_device.h>
-+#include <linux/property.h>
- #include <dt-bindings/clock/en7523-clk.h>
- #define REG_PCI_CONTROL                       0x88
-@@ -48,6 +48,10 @@ struct en_clk_gate {
-       struct clk_hw hw;
- };
-+struct en_clk_soc_data {
-+      const struct clk_ops pcie_ops;
-+};
-+
- static const u32 gsw_base[] = { 400000000, 500000000 };
- static const u32 emi_base[] = { 333000000, 400000000 };
- static const u32 bus_base[] = { 500000000, 540000000 };
-@@ -150,11 +154,6 @@ static const struct en_clk_desc en7523_b
-       }
- };
--static const struct of_device_id of_match_clk_en7523[] = {
--      { .compatible = "airoha,en7523-scu", },
--      { /* sentinel */ }
--};
--
- static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
- {
-       const struct en_clk_desc *desc = &en7523_base_clks[i];
-@@ -252,14 +251,10 @@ static void en7523_pci_unprepare(struct
- static struct clk_hw *en7523_register_pcie_clk(struct device *dev,
-                                              void __iomem *np_base)
- {
--      static const struct clk_ops pcie_gate_ops = {
--              .is_enabled = en7523_pci_is_enabled,
--              .prepare = en7523_pci_prepare,
--              .unprepare = en7523_pci_unprepare,
--      };
-+      const struct en_clk_soc_data *soc_data = device_get_match_data(dev);
-       struct clk_init_data init = {
-               .name = "pcie",
--              .ops = &pcie_gate_ops,
-+              .ops = &soc_data->pcie_ops,
-       };
-       struct en_clk_gate *cg;
-@@ -269,7 +264,7 @@ static struct clk_hw *en7523_register_pc
-       cg->base = np_base;
-       cg->hw.init = &init;
--      en7523_pci_unprepare(&cg->hw);
-+      init.ops->unprepare(&cg->hw);
-       if (clk_hw_register(dev, &cg->hw))
-               return NULL;
-@@ -338,6 +333,19 @@ static int en7523_clk_probe(struct platf
-       return r;
- }
-+static const struct en_clk_soc_data en7523_data = {
-+      .pcie_ops = {
-+              .is_enabled = en7523_pci_is_enabled,
-+              .prepare = en7523_pci_prepare,
-+              .unprepare = en7523_pci_unprepare,
-+      },
-+};
-+
-+static const struct of_device_id of_match_clk_en7523[] = {
-+      { .compatible = "airoha,en7523-scu", .data = &en7523_data },
-+      { /* sentinel */ }
-+};
-+
- static struct platform_driver clk_en7523_drv = {
-       .probe = en7523_clk_probe,
-       .driver = {
diff --git a/target/linux/airoha/patches-6.6/018-02-v6.10-clk-en7523-Add-EN7581-support.patch b/target/linux/airoha/patches-6.6/018-02-v6.10-clk-en7523-Add-EN7581-support.patch
deleted file mode 100644 (file)
index c27b79c..0000000
+++ /dev/null
@@ -1,248 +0,0 @@
-From 66bc47326ce2a319add7e933d9340215711236ac Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sat, 6 Apr 2024 12:43:44 +0200
-Subject: [PATCH 2/2] clk: en7523: Add EN7581 support
-
-Introduce EN7581 clock support to clk-en7523 driver.
-Add hw_init callback to en_clk_soc_data data structure.
-
-Tested-by: Zhengping Zhang <zhengping.zhang@airoha.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/57b6e53ed4d2b2e38abff6a3ea56841bad6be8a9.1712399981.git.lorenzo@kernel.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 157 +++++++++++++++++++++++++++++++++++++--
- 1 file changed, 152 insertions(+), 5 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -10,7 +10,9 @@
- #define REG_PCI_CONTROL                       0x88
- #define   REG_PCI_CONTROL_PERSTOUT    BIT(29)
- #define   REG_PCI_CONTROL_PERSTOUT1   BIT(26)
-+#define   REG_PCI_CONTROL_REFCLK_EN0  BIT(23)
- #define   REG_PCI_CONTROL_REFCLK_EN1  BIT(22)
-+#define   REG_PCI_CONTROL_PERSTOUT2   BIT(16)
- #define REG_GSW_CLK_DIV_SEL           0x1b4
- #define REG_EMI_CLK_DIV_SEL           0x1b8
- #define REG_BUS_CLK_DIV_SEL           0x1bc
-@@ -18,10 +20,25 @@
- #define REG_SPI_CLK_FREQ_SEL          0x1c8
- #define REG_NPU_CLK_DIV_SEL           0x1fc
- #define REG_CRYPTO_CLKSRC             0x200
--#define REG_RESET_CONTROL             0x834
-+#define REG_RESET_CONTROL2            0x830
-+#define   REG_RESET2_CONTROL_PCIE2    BIT(27)
-+#define REG_RESET_CONTROL1            0x834
- #define   REG_RESET_CONTROL_PCIEHB    BIT(29)
- #define   REG_RESET_CONTROL_PCIE1     BIT(27)
- #define   REG_RESET_CONTROL_PCIE2     BIT(26)
-+/* EN7581 */
-+#define REG_PCIE0_MEM                 0x00
-+#define REG_PCIE0_MEM_MASK            0x04
-+#define REG_PCIE1_MEM                 0x08
-+#define REG_PCIE1_MEM_MASK            0x0c
-+#define REG_PCIE2_MEM                 0x10
-+#define REG_PCIE2_MEM_MASK            0x14
-+#define REG_PCIE_RESET_OPEN_DRAIN     0x018c
-+#define REG_PCIE_RESET_OPEN_DRAIN_MASK        GENMASK(2, 0)
-+#define REG_NP_SCU_PCIC                       0x88
-+#define REG_NP_SCU_SSTR                       0x9c
-+#define REG_PCIE_XSI0_SEL_MASK                GENMASK(14, 13)
-+#define REG_PCIE_XSI1_SEL_MASK                GENMASK(12, 11)
- struct en_clk_desc {
-       int id;
-@@ -50,6 +67,8 @@ struct en_clk_gate {
- struct en_clk_soc_data {
-       const struct clk_ops pcie_ops;
-+      int (*hw_init)(struct platform_device *pdev, void __iomem *base,
-+                     void __iomem *np_base);
- };
- static const u32 gsw_base[] = { 400000000, 500000000 };
-@@ -216,14 +235,14 @@ static int en7523_pci_prepare(struct clk
-       usleep_range(1000, 2000);
-       /* Reset to default */
--      val = readl(np_base + REG_RESET_CONTROL);
-+      val = readl(np_base + REG_RESET_CONTROL1);
-       mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
-              REG_RESET_CONTROL_PCIEHB;
--      writel(val & ~mask, np_base + REG_RESET_CONTROL);
-+      writel(val & ~mask, np_base + REG_RESET_CONTROL1);
-       usleep_range(1000, 2000);
--      writel(val | mask, np_base + REG_RESET_CONTROL);
-+      writel(val | mask, np_base + REG_RESET_CONTROL1);
-       msleep(100);
--      writel(val & ~mask, np_base + REG_RESET_CONTROL);
-+      writel(val & ~mask, np_base + REG_RESET_CONTROL1);
-       usleep_range(5000, 10000);
-       /* Release device */
-@@ -264,6 +283,9 @@ static struct clk_hw *en7523_register_pc
-       cg->base = np_base;
-       cg->hw.init = &init;
-+
-+      if (init.ops->disable)
-+              init.ops->disable(&cg->hw);
-       init.ops->unprepare(&cg->hw);
-       if (clk_hw_register(dev, &cg->hw))
-@@ -272,6 +294,111 @@ static struct clk_hw *en7523_register_pc
-       return &cg->hw;
- }
-+static int en7581_pci_is_enabled(struct clk_hw *hw)
-+{
-+      struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
-+      u32 val, mask;
-+
-+      mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1;
-+      val = readl(cg->base + REG_PCI_CONTROL);
-+      return (val & mask) == mask;
-+}
-+
-+static int en7581_pci_prepare(struct clk_hw *hw)
-+{
-+      struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
-+      void __iomem *np_base = cg->base;
-+      u32 val, mask;
-+
-+      mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
-+             REG_RESET_CONTROL_PCIEHB;
-+      val = readl(np_base + REG_RESET_CONTROL1);
-+      writel(val & ~mask, np_base + REG_RESET_CONTROL1);
-+      val = readl(np_base + REG_RESET_CONTROL2);
-+      writel(val & ~REG_RESET2_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2);
-+      usleep_range(5000, 10000);
-+
-+      return 0;
-+}
-+
-+static int en7581_pci_enable(struct clk_hw *hw)
-+{
-+      struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
-+      void __iomem *np_base = cg->base;
-+      u32 val, mask;
-+
-+      mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1 |
-+             REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT2 |
-+             REG_PCI_CONTROL_PERSTOUT;
-+      val = readl(np_base + REG_PCI_CONTROL);
-+      writel(val | mask, np_base + REG_PCI_CONTROL);
-+      msleep(250);
-+
-+      return 0;
-+}
-+
-+static void en7581_pci_unprepare(struct clk_hw *hw)
-+{
-+      struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
-+      void __iomem *np_base = cg->base;
-+      u32 val, mask;
-+
-+      mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
-+             REG_RESET_CONTROL_PCIEHB;
-+      val = readl(np_base + REG_RESET_CONTROL1);
-+      writel(val | mask, np_base + REG_RESET_CONTROL1);
-+      mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2;
-+      writel(val | mask, np_base + REG_RESET_CONTROL1);
-+      val = readl(np_base + REG_RESET_CONTROL2);
-+      writel(val | REG_RESET_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2);
-+      msleep(100);
-+}
-+
-+static void en7581_pci_disable(struct clk_hw *hw)
-+{
-+      struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
-+      void __iomem *np_base = cg->base;
-+      u32 val, mask;
-+
-+      mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1 |
-+             REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT2 |
-+             REG_PCI_CONTROL_PERSTOUT;
-+      val = readl(np_base + REG_PCI_CONTROL);
-+      writel(val & ~mask, np_base + REG_PCI_CONTROL);
-+      usleep_range(1000, 2000);
-+}
-+
-+static int en7581_clk_hw_init(struct platform_device *pdev,
-+                            void __iomem *base,
-+                            void __iomem *np_base)
-+{
-+      void __iomem *pb_base;
-+      u32 val;
-+
-+      pb_base = devm_platform_ioremap_resource(pdev, 2);
-+      if (IS_ERR(pb_base))
-+              return PTR_ERR(pb_base);
-+
-+      val = readl(np_base + REG_NP_SCU_SSTR);
-+      val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
-+      writel(val, np_base + REG_NP_SCU_SSTR);
-+      val = readl(np_base + REG_NP_SCU_PCIC);
-+      writel(val | 3, np_base + REG_NP_SCU_PCIC);
-+
-+      writel(0x20000000, pb_base + REG_PCIE0_MEM);
-+      writel(0xfc000000, pb_base + REG_PCIE0_MEM_MASK);
-+      writel(0x24000000, pb_base + REG_PCIE1_MEM);
-+      writel(0xfc000000, pb_base + REG_PCIE1_MEM_MASK);
-+      writel(0x28000000, pb_base + REG_PCIE2_MEM);
-+      writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK);
-+
-+      val = readl(base + REG_PCIE_RESET_OPEN_DRAIN);
-+      writel(val | REG_PCIE_RESET_OPEN_DRAIN_MASK,
-+             base + REG_PCIE_RESET_OPEN_DRAIN);
-+
-+      return 0;
-+}
-+
- static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
-                                  void __iomem *base, void __iomem *np_base)
- {
-@@ -304,6 +431,7 @@ static void en7523_register_clocks(struc
- static int en7523_clk_probe(struct platform_device *pdev)
- {
-       struct device_node *node = pdev->dev.of_node;
-+      const struct en_clk_soc_data *soc_data;
-       struct clk_hw_onecell_data *clk_data;
-       void __iomem *base, *np_base;
-       int r;
-@@ -316,6 +444,13 @@ static int en7523_clk_probe(struct platf
-       if (IS_ERR(np_base))
-               return PTR_ERR(np_base);
-+      soc_data = device_get_match_data(&pdev->dev);
-+      if (soc_data->hw_init) {
-+              r = soc_data->hw_init(pdev, base, np_base);
-+              if (r)
-+                      return r;
-+      }
-+
-       clk_data = devm_kzalloc(&pdev->dev,
-                               struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
-                               GFP_KERNEL);
-@@ -341,8 +476,20 @@ static const struct en_clk_soc_data en75
-       },
- };
-+static const struct en_clk_soc_data en7581_data = {
-+      .pcie_ops = {
-+              .is_enabled = en7581_pci_is_enabled,
-+              .prepare = en7581_pci_prepare,
-+              .enable = en7581_pci_enable,
-+              .unprepare = en7581_pci_unprepare,
-+              .disable = en7581_pci_disable,
-+      },
-+      .hw_init = en7581_clk_hw_init,
-+};
-+
- static const struct of_device_id of_match_clk_en7523[] = {
-       { .compatible = "airoha,en7523-scu", .data = &en7523_data },
-+      { .compatible = "airoha,en7581-scu", .data = &en7581_data },
-       { /* sentinel */ }
- };
diff --git a/target/linux/airoha/patches-6.6/019-01-v6.11-clk-en7523-Add-reset-controller-support-for-EN7581-S.patch b/target/linux/airoha/patches-6.6/019-01-v6.11-clk-en7523-Add-reset-controller-support-for-EN7581-S.patch
deleted file mode 100644 (file)
index 4d9ff9e..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-From e0d8ea4ed5fa70fd085a54d0b574a044b9407c39 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 27 Jun 2024 13:04:23 +0200
-Subject: [PATCH 1/4] clk: en7523: Add reset-controller support for EN7581 SoC
-
-Introduce reset API support to EN7581 clock driver.
-
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Tested-by: Zhengping Zhang <zhengping.zhang@airoha.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/4f735d17e549ea53769bf5a3f50406debb879a44.1719485847.git.lorenzo@kernel.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 192 ++++++++++++++++++++++++++++++++++++++-
- 1 file changed, 187 insertions(+), 5 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -5,7 +5,11 @@
- #include <linux/io.h>
- #include <linux/platform_device.h>
- #include <linux/property.h>
-+#include <linux/reset-controller.h>
- #include <dt-bindings/clock/en7523-clk.h>
-+#include <dt-bindings/reset/airoha,en7581-reset.h>
-+
-+#define RST_NR_PER_BANK                       32
- #define REG_PCI_CONTROL                       0x88
- #define   REG_PCI_CONTROL_PERSTOUT    BIT(29)
-@@ -40,6 +44,9 @@
- #define REG_PCIE_XSI0_SEL_MASK                GENMASK(14, 13)
- #define REG_PCIE_XSI1_SEL_MASK                GENMASK(12, 11)
-+#define REG_RST_CTRL2                 0x00
-+#define REG_RST_CTRL1                 0x04
-+
- struct en_clk_desc {
-       int id;
-       const char *name;
-@@ -65,8 +72,20 @@ struct en_clk_gate {
-       struct clk_hw hw;
- };
-+struct en_rst_data {
-+      const u16 *bank_ofs;
-+      const u16 *idx_map;
-+      void __iomem *base;
-+      struct reset_controller_dev rcdev;
-+};
-+
- struct en_clk_soc_data {
-       const struct clk_ops pcie_ops;
-+      struct {
-+              const u16 *bank_ofs;
-+              const u16 *idx_map;
-+              u16 idx_map_nr;
-+      } reset;
-       int (*hw_init)(struct platform_device *pdev, void __iomem *base,
-                      void __iomem *np_base);
- };
-@@ -173,6 +192,69 @@ static const struct en_clk_desc en7523_b
-       }
- };
-+static const u16 en7581_rst_ofs[] = {
-+      REG_RST_CTRL2,
-+      REG_RST_CTRL1,
-+};
-+
-+static const u16 en7581_rst_map[] = {
-+      /* RST_CTRL2 */
-+      [EN7581_XPON_PHY_RST]           = 0,
-+      [EN7581_CPU_TIMER2_RST]         = 2,
-+      [EN7581_HSUART_RST]             = 3,
-+      [EN7581_UART4_RST]              = 4,
-+      [EN7581_UART5_RST]              = 5,
-+      [EN7581_I2C2_RST]               = 6,
-+      [EN7581_XSI_MAC_RST]            = 7,
-+      [EN7581_XSI_PHY_RST]            = 8,
-+      [EN7581_NPU_RST]                = 9,
-+      [EN7581_I2S_RST]                = 10,
-+      [EN7581_TRNG_RST]               = 11,
-+      [EN7581_TRNG_MSTART_RST]        = 12,
-+      [EN7581_DUAL_HSI0_RST]          = 13,
-+      [EN7581_DUAL_HSI1_RST]          = 14,
-+      [EN7581_HSI_RST]                = 15,
-+      [EN7581_DUAL_HSI0_MAC_RST]      = 16,
-+      [EN7581_DUAL_HSI1_MAC_RST]      = 17,
-+      [EN7581_HSI_MAC_RST]            = 18,
-+      [EN7581_WDMA_RST]               = 19,
-+      [EN7581_WOE0_RST]               = 20,
-+      [EN7581_WOE1_RST]               = 21,
-+      [EN7581_HSDMA_RST]              = 22,
-+      [EN7581_TDMA_RST]               = 24,
-+      [EN7581_EMMC_RST]               = 25,
-+      [EN7581_SOE_RST]                = 26,
-+      [EN7581_PCIE2_RST]              = 27,
-+      [EN7581_XFP_MAC_RST]            = 28,
-+      [EN7581_USB_HOST_P1_RST]        = 29,
-+      [EN7581_USB_HOST_P1_U3_PHY_RST] = 30,
-+      /* RST_CTRL1 */
-+      [EN7581_PCM1_ZSI_ISI_RST]       = RST_NR_PER_BANK + 0,
-+      [EN7581_FE_PDMA_RST]            = RST_NR_PER_BANK + 1,
-+      [EN7581_FE_QDMA_RST]            = RST_NR_PER_BANK + 2,
-+      [EN7581_PCM_SPIWP_RST]          = RST_NR_PER_BANK + 4,
-+      [EN7581_CRYPTO_RST]             = RST_NR_PER_BANK + 6,
-+      [EN7581_TIMER_RST]              = RST_NR_PER_BANK + 8,
-+      [EN7581_PCM1_RST]               = RST_NR_PER_BANK + 11,
-+      [EN7581_UART_RST]               = RST_NR_PER_BANK + 12,
-+      [EN7581_GPIO_RST]               = RST_NR_PER_BANK + 13,
-+      [EN7581_GDMA_RST]               = RST_NR_PER_BANK + 14,
-+      [EN7581_I2C_MASTER_RST]         = RST_NR_PER_BANK + 16,
-+      [EN7581_PCM2_ZSI_ISI_RST]       = RST_NR_PER_BANK + 17,
-+      [EN7581_SFC_RST]                = RST_NR_PER_BANK + 18,
-+      [EN7581_UART2_RST]              = RST_NR_PER_BANK + 19,
-+      [EN7581_GDMP_RST]               = RST_NR_PER_BANK + 20,
-+      [EN7581_FE_RST]                 = RST_NR_PER_BANK + 21,
-+      [EN7581_USB_HOST_P0_RST]        = RST_NR_PER_BANK + 22,
-+      [EN7581_GSW_RST]                = RST_NR_PER_BANK + 23,
-+      [EN7581_SFC2_PCM_RST]           = RST_NR_PER_BANK + 25,
-+      [EN7581_PCIE0_RST]              = RST_NR_PER_BANK + 26,
-+      [EN7581_PCIE1_RST]              = RST_NR_PER_BANK + 27,
-+      [EN7581_CPU_TIMER_RST]          = RST_NR_PER_BANK + 28,
-+      [EN7581_PCIE_HB_RST]            = RST_NR_PER_BANK + 29,
-+      [EN7581_XPON_MAC_RST]           = RST_NR_PER_BANK + 31,
-+};
-+
- static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
- {
-       const struct en_clk_desc *desc = &en7523_base_clks[i];
-@@ -375,7 +457,7 @@ static int en7581_clk_hw_init(struct pla
-       void __iomem *pb_base;
-       u32 val;
--      pb_base = devm_platform_ioremap_resource(pdev, 2);
-+      pb_base = devm_platform_ioremap_resource(pdev, 3);
-       if (IS_ERR(pb_base))
-               return PTR_ERR(pb_base);
-@@ -428,6 +510,95 @@ static void en7523_register_clocks(struc
-       clk_data->hws[EN7523_CLK_PCIE] = hw;
- }
-+static int en7523_reset_update(struct reset_controller_dev *rcdev,
-+                             unsigned long id, bool assert)
-+{
-+      struct en_rst_data *rst_data = container_of(rcdev, struct en_rst_data, rcdev);
-+      void __iomem *addr = rst_data->base + rst_data->bank_ofs[id / RST_NR_PER_BANK];
-+      u32 val;
-+
-+      val = readl(addr);
-+      if (assert)
-+              val |= BIT(id % RST_NR_PER_BANK);
-+      else
-+              val &= ~BIT(id % RST_NR_PER_BANK);
-+      writel(val, addr);
-+
-+      return 0;
-+}
-+
-+static int en7523_reset_assert(struct reset_controller_dev *rcdev,
-+                             unsigned long id)
-+{
-+      return en7523_reset_update(rcdev, id, true);
-+}
-+
-+static int en7523_reset_deassert(struct reset_controller_dev *rcdev,
-+                               unsigned long id)
-+{
-+      return en7523_reset_update(rcdev, id, false);
-+}
-+
-+static int en7523_reset_status(struct reset_controller_dev *rcdev,
-+                             unsigned long id)
-+{
-+      struct en_rst_data *rst_data = container_of(rcdev, struct en_rst_data, rcdev);
-+      void __iomem *addr = rst_data->base + rst_data->bank_ofs[id / RST_NR_PER_BANK];
-+
-+      return !!(readl(addr) & BIT(id % RST_NR_PER_BANK));
-+}
-+
-+static int en7523_reset_xlate(struct reset_controller_dev *rcdev,
-+                            const struct of_phandle_args *reset_spec)
-+{
-+      struct en_rst_data *rst_data = container_of(rcdev, struct en_rst_data, rcdev);
-+
-+      if (reset_spec->args[0] >= rcdev->nr_resets)
-+              return -EINVAL;
-+
-+      return rst_data->idx_map[reset_spec->args[0]];
-+}
-+
-+static const struct reset_control_ops en7523_reset_ops = {
-+      .assert = en7523_reset_assert,
-+      .deassert = en7523_reset_deassert,
-+      .status = en7523_reset_status,
-+};
-+
-+static int en7523_reset_register(struct platform_device *pdev,
-+                               const struct en_clk_soc_data *soc_data)
-+{
-+      struct device *dev = &pdev->dev;
-+      struct en_rst_data *rst_data;
-+      void __iomem *base;
-+
-+      /* no reset lines available */
-+      if (!soc_data->reset.idx_map_nr)
-+              return 0;
-+
-+      base = devm_platform_ioremap_resource(pdev, 2);
-+      if (IS_ERR(base))
-+              return PTR_ERR(base);
-+
-+      rst_data = devm_kzalloc(dev, sizeof(*rst_data), GFP_KERNEL);
-+      if (!rst_data)
-+              return -ENOMEM;
-+
-+      rst_data->bank_ofs = soc_data->reset.bank_ofs;
-+      rst_data->idx_map = soc_data->reset.idx_map;
-+      rst_data->base = base;
-+
-+      rst_data->rcdev.nr_resets = soc_data->reset.idx_map_nr;
-+      rst_data->rcdev.of_xlate = en7523_reset_xlate;
-+      rst_data->rcdev.ops = &en7523_reset_ops;
-+      rst_data->rcdev.of_node = dev->of_node;
-+      rst_data->rcdev.of_reset_n_cells = 1;
-+      rst_data->rcdev.owner = THIS_MODULE;
-+      rst_data->rcdev.dev = dev;
-+
-+      return devm_reset_controller_register(dev, &rst_data->rcdev);
-+}
-+
- static int en7523_clk_probe(struct platform_device *pdev)
- {
-       struct device_node *node = pdev->dev.of_node;
-@@ -461,11 +632,17 @@ static int en7523_clk_probe(struct platf
-       r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-       if (r)
--              dev_err(&pdev->dev,
--                      "could not register clock provider: %s: %d\n",
--                      pdev->name, r);
-+              return dev_err_probe(&pdev->dev, r, "Could not register clock provider: %s\n",
-+                                   pdev->name);
-+
-+      r = en7523_reset_register(pdev, soc_data);
-+      if (r) {
-+              of_clk_del_provider(node);
-+              return dev_err_probe(&pdev->dev, r, "Could not register reset controller: %s\n",
-+                                   pdev->name);
-+      }
--      return r;
-+      return 0;
- }
- static const struct en_clk_soc_data en7523_data = {
-@@ -484,6 +661,11 @@ static const struct en_clk_soc_data en75
-               .unprepare = en7581_pci_unprepare,
-               .disable = en7581_pci_disable,
-       },
-+      .reset = {
-+              .bank_ofs = en7581_rst_ofs,
-+              .idx_map = en7581_rst_map,
-+              .idx_map_nr = ARRAY_SIZE(en7581_rst_map),
-+      },
-       .hw_init = en7581_clk_hw_init,
- };
diff --git a/target/linux/airoha/patches-6.6/019-02-v6.11-clk-en7523-Remove-pcie-prepare-unpreare-callbacks-fo.patch b/target/linux/airoha/patches-6.6/019-02-v6.11-clk-en7523-Remove-pcie-prepare-unpreare-callbacks-fo.patch
deleted file mode 100644 (file)
index 2a32ad0..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-From db7a4a11e8be375b0a9c159f688e0cea49eacc5d Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 27 Jun 2024 13:04:24 +0200
-Subject: [PATCH 2/4] clk: en7523: Remove pcie prepare/unpreare callbacks for
- EN7581 SoC
-
-Get rid of prepare and unpreare callbacks for PCIe clock since they can
-be modeled as a reset line cosumed by the PCIe driver
-(pcie-mediatek-gen3)
-
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Tested-by: Zhengping Zhang <zhengping.zhang@airoha.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/16df149975514d3030499c48fc1c64f090093595.1719485847.git.lorenzo@kernel.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 41 ++--------------------------------------
- 1 file changed, 2 insertions(+), 39 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -366,9 +366,8 @@ static struct clk_hw *en7523_register_pc
-       cg->base = np_base;
-       cg->hw.init = &init;
--      if (init.ops->disable)
--              init.ops->disable(&cg->hw);
--      init.ops->unprepare(&cg->hw);
-+      if (init.ops->unprepare)
-+              init.ops->unprepare(&cg->hw);
-       if (clk_hw_register(dev, &cg->hw))
-               return NULL;
-@@ -386,23 +385,6 @@ static int en7581_pci_is_enabled(struct
-       return (val & mask) == mask;
- }
--static int en7581_pci_prepare(struct clk_hw *hw)
--{
--      struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
--      void __iomem *np_base = cg->base;
--      u32 val, mask;
--
--      mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
--             REG_RESET_CONTROL_PCIEHB;
--      val = readl(np_base + REG_RESET_CONTROL1);
--      writel(val & ~mask, np_base + REG_RESET_CONTROL1);
--      val = readl(np_base + REG_RESET_CONTROL2);
--      writel(val & ~REG_RESET2_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2);
--      usleep_range(5000, 10000);
--
--      return 0;
--}
--
- static int en7581_pci_enable(struct clk_hw *hw)
- {
-       struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
-@@ -419,23 +401,6 @@ static int en7581_pci_enable(struct clk_
-       return 0;
- }
--static void en7581_pci_unprepare(struct clk_hw *hw)
--{
--      struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
--      void __iomem *np_base = cg->base;
--      u32 val, mask;
--
--      mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
--             REG_RESET_CONTROL_PCIEHB;
--      val = readl(np_base + REG_RESET_CONTROL1);
--      writel(val | mask, np_base + REG_RESET_CONTROL1);
--      mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2;
--      writel(val | mask, np_base + REG_RESET_CONTROL1);
--      val = readl(np_base + REG_RESET_CONTROL2);
--      writel(val | REG_RESET_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2);
--      msleep(100);
--}
--
- static void en7581_pci_disable(struct clk_hw *hw)
- {
-       struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
-@@ -656,9 +621,7 @@ static const struct en_clk_soc_data en75
- static const struct en_clk_soc_data en7581_data = {
-       .pcie_ops = {
-               .is_enabled = en7581_pci_is_enabled,
--              .prepare = en7581_pci_prepare,
-               .enable = en7581_pci_enable,
--              .unprepare = en7581_pci_unprepare,
-               .disable = en7581_pci_disable,
-       },
-       .reset = {
diff --git a/target/linux/airoha/patches-6.6/019-03-v6.11-clk-en7523-Remove-PCIe-reset-open-drain-configuratio.patch b/target/linux/airoha/patches-6.6/019-03-v6.11-clk-en7523-Remove-PCIe-reset-open-drain-configuratio.patch
deleted file mode 100644 (file)
index 8a4b9c7..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-From bf288bd25d6232310abb81db417376ce460eb032 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 27 Jun 2024 13:04:25 +0200
-Subject: [PATCH 3/4] clk: en7523: Remove PCIe reset open drain configuration
- for EN7581
-
-PCIe reset open drain configuration will be managed by pinctrl driver.
-
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/43276af5f08a554b4ab2e52e8d437fff5c06a732.1719485847.git.lorenzo@kernel.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 12 ++----------
- 1 file changed, 2 insertions(+), 10 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -37,8 +37,6 @@
- #define REG_PCIE1_MEM_MASK            0x0c
- #define REG_PCIE2_MEM                 0x10
- #define REG_PCIE2_MEM_MASK            0x14
--#define REG_PCIE_RESET_OPEN_DRAIN     0x018c
--#define REG_PCIE_RESET_OPEN_DRAIN_MASK        GENMASK(2, 0)
- #define REG_NP_SCU_PCIC                       0x88
- #define REG_NP_SCU_SSTR                       0x9c
- #define REG_PCIE_XSI0_SEL_MASK                GENMASK(14, 13)
-@@ -86,8 +84,7 @@ struct en_clk_soc_data {
-               const u16 *idx_map;
-               u16 idx_map_nr;
-       } reset;
--      int (*hw_init)(struct platform_device *pdev, void __iomem *base,
--                     void __iomem *np_base);
-+      int (*hw_init)(struct platform_device *pdev, void __iomem *np_base);
- };
- static const u32 gsw_base[] = { 400000000, 500000000 };
-@@ -416,7 +413,6 @@ static void en7581_pci_disable(struct cl
- }
- static int en7581_clk_hw_init(struct platform_device *pdev,
--                            void __iomem *base,
-                             void __iomem *np_base)
- {
-       void __iomem *pb_base;
-@@ -439,10 +435,6 @@ static int en7581_clk_hw_init(struct pla
-       writel(0x28000000, pb_base + REG_PCIE2_MEM);
-       writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK);
--      val = readl(base + REG_PCIE_RESET_OPEN_DRAIN);
--      writel(val | REG_PCIE_RESET_OPEN_DRAIN_MASK,
--             base + REG_PCIE_RESET_OPEN_DRAIN);
--
-       return 0;
- }
-@@ -582,7 +574,7 @@ static int en7523_clk_probe(struct platf
-       soc_data = device_get_match_data(&pdev->dev);
-       if (soc_data->hw_init) {
--              r = soc_data->hw_init(pdev, base, np_base);
-+              r = soc_data->hw_init(pdev, np_base);
-               if (r)
-                       return r;
-       }
diff --git a/target/linux/airoha/patches-6.6/020-v6.11-dt-bindings-clock-airoha-Add-reset-support-to-EN7581.patch b/target/linux/airoha/patches-6.6/020-v6.11-dt-bindings-clock-airoha-Add-reset-support-to-EN7581.patch
deleted file mode 100644 (file)
index 49ab4e9..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-From 7aa291962f4c3b7afb9a12fa60b406b95e5eacb4 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 27 Jun 2024 13:04:22 +0200
-Subject: [PATCH] dt-bindings: clock: airoha: Add reset support to EN7581 clock
- binding
-
-Introduce reset capability to EN7581 device-tree clock binding
-documentation. Add reset register mapping between misc scu and pb scu
-ones in order to follow the memory order. This change is not
-introducing any backward compatibility issue since the EN7581 dts is not
-upstream yet.
-
-Fixes: 0a382be005cf ("dt-bindings: clock: airoha: add EN7581 binding")
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Reviewed-by: Rob Herring (Arm) <robh@kernel.org>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/28fef3e83062d5d71e7b4be4b47583f851a15bf8.1719485847.git.lorenzo@kernel.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- .../bindings/clock/airoha,en7523-scu.yaml     | 25 ++++++-
- .../dt-bindings/reset/airoha,en7581-reset.h   | 66 +++++++++++++++++++
- 2 files changed, 90 insertions(+), 1 deletion(-)
- create mode 100644 include/dt-bindings/reset/airoha,en7581-reset.h
-
---- /dev/null
-+++ b/include/dt-bindings/reset/airoha,en7581-reset.h
-@@ -0,0 +1,66 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ */
-+
-+#ifndef __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7581_H_
-+#define __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7581_H_
-+
-+/* RST_CTRL2 */
-+#define EN7581_XPON_PHY_RST            0
-+#define EN7581_CPU_TIMER2_RST          1
-+#define EN7581_HSUART_RST              2
-+#define EN7581_UART4_RST               3
-+#define EN7581_UART5_RST               4
-+#define EN7581_I2C2_RST                        5
-+#define EN7581_XSI_MAC_RST             6
-+#define EN7581_XSI_PHY_RST             7
-+#define EN7581_NPU_RST                         8
-+#define EN7581_I2S_RST                         9
-+#define EN7581_TRNG_RST                       10
-+#define EN7581_TRNG_MSTART_RST                11
-+#define EN7581_DUAL_HSI0_RST          12
-+#define EN7581_DUAL_HSI1_RST          13
-+#define EN7581_HSI_RST                        14
-+#define EN7581_DUAL_HSI0_MAC_RST      15
-+#define EN7581_DUAL_HSI1_MAC_RST      16
-+#define EN7581_HSI_MAC_RST            17
-+#define EN7581_WDMA_RST                       18
-+#define EN7581_WOE0_RST                       19
-+#define EN7581_WOE1_RST                       20
-+#define EN7581_HSDMA_RST              21
-+#define EN7581_TDMA_RST                       22
-+#define EN7581_EMMC_RST                       23
-+#define EN7581_SOE_RST                        24
-+#define EN7581_PCIE2_RST              25
-+#define EN7581_XFP_MAC_RST            26
-+#define EN7581_USB_HOST_P1_RST                27
-+#define EN7581_USB_HOST_P1_U3_PHY_RST 28
-+/* RST_CTRL1 */
-+#define EN7581_PCM1_ZSI_ISI_RST               29
-+#define EN7581_FE_PDMA_RST            30
-+#define EN7581_FE_QDMA_RST            31
-+#define EN7581_PCM_SPIWP_RST          32
-+#define EN7581_CRYPTO_RST             33
-+#define EN7581_TIMER_RST              34
-+#define EN7581_PCM1_RST                       35
-+#define EN7581_UART_RST                       36
-+#define EN7581_GPIO_RST                       37
-+#define EN7581_GDMA_RST                       38
-+#define EN7581_I2C_MASTER_RST         39
-+#define EN7581_PCM2_ZSI_ISI_RST               40
-+#define EN7581_SFC_RST                        41
-+#define EN7581_UART2_RST              42
-+#define EN7581_GDMP_RST                       43
-+#define EN7581_FE_RST                 44
-+#define EN7581_USB_HOST_P0_RST                45
-+#define EN7581_GSW_RST                        46
-+#define EN7581_SFC2_PCM_RST           47
-+#define EN7581_PCIE0_RST              48
-+#define EN7581_PCIE1_RST              49
-+#define EN7581_CPU_TIMER_RST          50
-+#define EN7581_PCIE_HB_RST            51
-+#define EN7581_XPON_MAC_RST           52
-+
-+#endif /* __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7581_H_ */
diff --git a/target/linux/airoha/patches-6.6/021-01-v6.12-PCI-mediatek-gen3-Add-mtk_gen3_pcie_pdata-data-struc.patch b/target/linux/airoha/patches-6.6/021-01-v6.12-PCI-mediatek-gen3-Add-mtk_gen3_pcie_pdata-data-struc.patch
deleted file mode 100644 (file)
index f09e69d..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-From dc869a40d73ee6e9f47d683690ae507e30e56044 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 3 Jul 2024 18:12:42 +0200
-Subject: [PATCH 1/3] PCI: mediatek-gen3: Add mtk_gen3_pcie_pdata data
- structure
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Introduce mtk_gen3_pcie_pdata data structure in order to define
-multiple callbacks for each supported SoC.
-
-This is a preliminary patch to introduce EN7581 PCIe support.
-
-Link: https://lore.kernel.org/linux-pci/c193d1a87505d045e2e0ef33317bce17012ee095.1720022580.git.lorenzo@kernel.org
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
-Tested-by: Zhengping Zhang <zhengping.zhang@airoha.com>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Acked-by: Jianjun Wang <jianjun.wang@mediatek.com>
----
- drivers/pci/controller/pcie-mediatek-gen3.c | 24 ++++++++++++++++++---
- 1 file changed, 21 insertions(+), 3 deletions(-)
-
---- a/drivers/pci/controller/pcie-mediatek-gen3.c
-+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
-@@ -100,6 +100,16 @@
- #define PCIE_ATR_TLP_TYPE_MEM         PCIE_ATR_TLP_TYPE(0)
- #define PCIE_ATR_TLP_TYPE_IO          PCIE_ATR_TLP_TYPE(2)
-+struct mtk_gen3_pcie;
-+
-+/**
-+ * struct mtk_gen3_pcie_pdata - differentiate between host generations
-+ * @power_up: pcie power_up callback
-+ */
-+struct mtk_gen3_pcie_pdata {
-+      int (*power_up)(struct mtk_gen3_pcie *pcie);
-+};
-+
- /**
-  * struct mtk_msi_set - MSI information for each set
-  * @base: IO mapped register base
-@@ -131,6 +141,7 @@ struct mtk_msi_set {
-  * @msi_sets: MSI sets information
-  * @lock: lock protecting IRQ bit map
-  * @msi_irq_in_use: bit map for assigned MSI IRQ
-+ * @soc: pointer to SoC-dependent operations
-  */
- struct mtk_gen3_pcie {
-       struct device *dev;
-@@ -151,6 +162,8 @@ struct mtk_gen3_pcie {
-       struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
-       struct mutex lock;
-       DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
-+
-+      const struct mtk_gen3_pcie_pdata *soc;
- };
- /* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
-@@ -904,7 +917,7 @@ static int mtk_pcie_setup(struct mtk_gen
-       usleep_range(10, 20);
-       /* Don't touch the hardware registers before power up */
--      err = mtk_pcie_power_up(pcie);
-+      err = pcie->soc->power_up(pcie);
-       if (err)
-               return err;
-@@ -939,6 +952,7 @@ static int mtk_pcie_probe(struct platfor
-       pcie = pci_host_bridge_priv(host);
-       pcie->dev = dev;
-+      pcie->soc = device_get_match_data(dev);
-       platform_set_drvdata(pdev, pcie);
-       err = mtk_pcie_setup(pcie);
-@@ -1054,7 +1068,7 @@ static int mtk_pcie_resume_noirq(struct
-       struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
-       int err;
--      err = mtk_pcie_power_up(pcie);
-+      err = pcie->soc->power_up(pcie);
-       if (err)
-               return err;
-@@ -1074,8 +1088,12 @@ static const struct dev_pm_ops mtk_pcie_
-                                 mtk_pcie_resume_noirq)
- };
-+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
-+      .power_up = mtk_pcie_power_up,
-+};
-+
- static const struct of_device_id mtk_pcie_of_match[] = {
--      { .compatible = "mediatek,mt8192-pcie" },
-+      { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
-       {},
- };
- MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
diff --git a/target/linux/airoha/patches-6.6/021-02-v6.12-PCI-mediatek-gen3-Rely-on-reset_bulk-APIs-for-PHY-re.patch b/target/linux/airoha/patches-6.6/021-02-v6.12-PCI-mediatek-gen3-Rely-on-reset_bulk-APIs-for-PHY-re.patch
deleted file mode 100644 (file)
index 5fbbc83..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-From ee9eabbe3f0f0c7458d89840add97e54d4e0bccf Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 3 Jul 2024 18:12:43 +0200
-Subject: [PATCH 2/3] PCI: mediatek-gen3: Rely on reset_bulk APIs for PHY reset
- lines
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Use reset_bulk APIs to manage PHY reset lines.
-
-This is a preliminary patch in order to add Airoha EN7581 PCIe support.
-
-Link: https://lore.kernel.org/linux-pci/3ceb83bc0defbcf868521f8df4b9100e55ec2614.1720022580.git.lorenzo@kernel.org
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
-Tested-by: Zhengping Zhang <zhengping.zhang@airoha.com>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Acked-by: Jianjun Wang <jianjun.wang@mediatek.com>
----
- drivers/pci/controller/pcie-mediatek-gen3.c | 45 +++++++++++++++------
- 1 file changed, 33 insertions(+), 12 deletions(-)
-
---- a/drivers/pci/controller/pcie-mediatek-gen3.c
-+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
-@@ -100,14 +100,21 @@
- #define PCIE_ATR_TLP_TYPE_MEM         PCIE_ATR_TLP_TYPE(0)
- #define PCIE_ATR_TLP_TYPE_IO          PCIE_ATR_TLP_TYPE(2)
-+#define MAX_NUM_PHY_RESETS            1
-+
- struct mtk_gen3_pcie;
- /**
-  * struct mtk_gen3_pcie_pdata - differentiate between host generations
-  * @power_up: pcie power_up callback
-+ * @phy_resets: phy reset lines SoC data.
-  */
- struct mtk_gen3_pcie_pdata {
-       int (*power_up)(struct mtk_gen3_pcie *pcie);
-+      struct {
-+              const char *id[MAX_NUM_PHY_RESETS];
-+              int num_resets;
-+      } phy_resets;
- };
- /**
-@@ -128,7 +135,7 @@ struct mtk_msi_set {
-  * @base: IO mapped register base
-  * @reg_base: physical register base
-  * @mac_reset: MAC reset control
-- * @phy_reset: PHY reset control
-+ * @phy_resets: PHY reset controllers
-  * @phy: PHY controller block
-  * @clks: PCIe clocks
-  * @num_clks: PCIe clocks count for this port
-@@ -148,7 +155,7 @@ struct mtk_gen3_pcie {
-       void __iomem *base;
-       phys_addr_t reg_base;
-       struct reset_control *mac_reset;
--      struct reset_control *phy_reset;
-+      struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
-       struct phy *phy;
-       struct clk_bulk_data *clks;
-       int num_clks;
-@@ -788,10 +795,10 @@ static int mtk_pcie_setup_irq(struct mtk
- static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
- {
-+      int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
-       struct device *dev = pcie->dev;
-       struct platform_device *pdev = to_platform_device(dev);
-       struct resource *regs;
--      int ret;
-       regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
-       if (!regs)
-@@ -804,12 +811,12 @@ static int mtk_pcie_parse_port(struct mt
-       pcie->reg_base = regs->start;
--      pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
--      if (IS_ERR(pcie->phy_reset)) {
--              ret = PTR_ERR(pcie->phy_reset);
--              if (ret != -EPROBE_DEFER)
--                      dev_err(dev, "failed to get PHY reset\n");
-+      for (i = 0; i < num_resets; i++)
-+              pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
-+      ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets);
-+      if (ret) {
-+              dev_err(dev, "failed to get PHY bulk reset\n");
-               return ret;
-       }
-@@ -846,7 +853,11 @@ static int mtk_pcie_power_up(struct mtk_
-       int err;
-       /* PHY power on and enable pipe clock */
--      reset_control_deassert(pcie->phy_reset);
-+      err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
-+      if (err) {
-+              dev_err(dev, "failed to deassert PHYs\n");
-+              return err;
-+      }
-       err = phy_init(pcie->phy);
-       if (err) {
-@@ -882,7 +893,7 @@ err_clk_init:
- err_phy_on:
-       phy_exit(pcie->phy);
- err_phy_init:
--      reset_control_assert(pcie->phy_reset);
-+      reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
-       return err;
- }
-@@ -897,7 +908,7 @@ static void mtk_pcie_power_down(struct m
-       phy_power_off(pcie->phy);
-       phy_exit(pcie->phy);
--      reset_control_assert(pcie->phy_reset);
-+      reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
- }
- static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
-@@ -909,10 +920,16 @@ static int mtk_pcie_setup(struct mtk_gen
-               return err;
-       /*
-+       * Deassert the line in order to avoid unbalance in deassert_count
-+       * counter since the bulk is shared.
-+       */
-+      reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
-+      /*
-        * The controller may have been left out of reset by the bootloader
-        * so make sure that we get a clean start by asserting resets here.
-        */
--      reset_control_assert(pcie->phy_reset);
-+      reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
-+
-       reset_control_assert(pcie->mac_reset);
-       usleep_range(10, 20);
-@@ -1090,6 +1107,10 @@ static const struct dev_pm_ops mtk_pcie_
- static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
-       .power_up = mtk_pcie_power_up,
-+      .phy_resets = {
-+              .id[0] = "phy",
-+              .num_resets = 1,
-+      },
- };
- static const struct of_device_id mtk_pcie_of_match[] = {
diff --git a/target/linux/airoha/patches-6.6/021-03-v6.12-PCI-mediatek-gen3-Add-Airoha-EN7581-support.patch b/target/linux/airoha/patches-6.6/021-03-v6.12-PCI-mediatek-gen3-Add-Airoha-EN7581-support.patch
deleted file mode 100644 (file)
index 19b003d..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
-From f6ab898356dd70f267c49045a79d28ea5cf5e43e Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 3 Jul 2024 18:12:44 +0200
-Subject: [PATCH 3/3] PCI: mediatek-gen3: Add Airoha EN7581 support
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Introduce support for Airoha EN7581 PCIe controller to mediatek-gen3
-PCIe controller driver.
-
-Link: https://lore.kernel.org/linux-pci/aca00bd672ee576ad96d279414fc0835ff31f637.1720022580.git.lorenzo@kernel.org
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
-Tested-by: Zhengping Zhang <zhengping.zhang@airoha.com>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Acked-by: Jianjun Wang <jianjun.wang@mediatek.com>
----
- drivers/pci/controller/Kconfig              |   2 +-
- drivers/pci/controller/pcie-mediatek-gen3.c | 113 +++++++++++++++++++-
- 2 files changed, 113 insertions(+), 2 deletions(-)
-
---- a/drivers/pci/controller/Kconfig
-+++ b/drivers/pci/controller/Kconfig
-@@ -196,7 +196,7 @@ config PCIE_MEDIATEK
- config PCIE_MEDIATEK_GEN3
-       tristate "MediaTek Gen3 PCIe controller"
--      depends on ARCH_MEDIATEK || COMPILE_TEST
-+      depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
-       depends on PCI_MSI
-       help
-         Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
---- a/drivers/pci/controller/pcie-mediatek-gen3.c
-+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
-@@ -6,7 +6,9 @@
-  * Author: Jianjun Wang <jianjun.wang@mediatek.com>
-  */
-+#include <linux/bitfield.h>
- #include <linux/clk.h>
-+#include <linux/clk-provider.h>
- #include <linux/delay.h>
- #include <linux/iopoll.h>
- #include <linux/irq.h>
-@@ -15,6 +17,8 @@
- #include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/msi.h>
-+#include <linux/of_device.h>
-+#include <linux/of_pci.h>
- #include <linux/pci.h>
- #include <linux/phy/phy.h>
- #include <linux/platform_device.h>
-@@ -29,6 +33,12 @@
- #define PCI_CLASS(class)              (class << 8)
- #define PCIE_RC_MODE                  BIT(0)
-+#define PCIE_EQ_PRESET_01_REG         0x100
-+#define PCIE_VAL_LN0_DOWNSTREAM               GENMASK(6, 0)
-+#define PCIE_VAL_LN0_UPSTREAM         GENMASK(14, 8)
-+#define PCIE_VAL_LN1_DOWNSTREAM               GENMASK(22, 16)
-+#define PCIE_VAL_LN1_UPSTREAM         GENMASK(30, 24)
-+
- #define PCIE_CFGNUM_REG                       0x140
- #define PCIE_CFG_DEVFN(devfn)         ((devfn) & GENMASK(7, 0))
- #define PCIE_CFG_BUS(bus)             (((bus) << 8) & GENMASK(15, 8))
-@@ -68,6 +78,14 @@
- #define PCIE_MSI_SET_ENABLE_REG               0x190
- #define PCIE_MSI_SET_ENABLE           GENMASK(PCIE_MSI_SET_NUM - 1, 0)
-+#define PCIE_PIPE4_PIE8_REG           0x338
-+#define PCIE_K_FINETUNE_MAX           GENMASK(5, 0)
-+#define PCIE_K_FINETUNE_ERR           GENMASK(7, 6)
-+#define PCIE_K_PRESET_TO_USE          GENMASK(18, 8)
-+#define PCIE_K_PHYPARAM_QUERY         BIT(19)
-+#define PCIE_K_QUERY_TIMEOUT          BIT(20)
-+#define PCIE_K_PRESET_TO_USE_16G      GENMASK(31, 21)
-+
- #define PCIE_MSI_SET_BASE_REG         0xc00
- #define PCIE_MSI_SET_OFFSET           0x10
- #define PCIE_MSI_SET_STATUS_OFFSET    0x04
-@@ -100,7 +118,10 @@
- #define PCIE_ATR_TLP_TYPE_MEM         PCIE_ATR_TLP_TYPE(0)
- #define PCIE_ATR_TLP_TYPE_IO          PCIE_ATR_TLP_TYPE(2)
--#define MAX_NUM_PHY_RESETS            1
-+#define MAX_NUM_PHY_RESETS            3
-+
-+/* Time in ms needed to complete PCIe reset on EN7581 SoC */
-+#define PCIE_EN7581_RESET_TIME_MS     100
- struct mtk_gen3_pcie;
-@@ -847,6 +868,85 @@ static int mtk_pcie_parse_port(struct mt
-       return 0;
- }
-+static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
-+{
-+      struct device *dev = pcie->dev;
-+      int err;
-+      u32 val;
-+
-+      /*
-+       * Wait for the time needed to complete the bulk assert in
-+       * mtk_pcie_setup for EN7581 SoC.
-+       */
-+      mdelay(PCIE_EN7581_RESET_TIME_MS);
-+
-+      err = phy_init(pcie->phy);
-+      if (err) {
-+              dev_err(dev, "failed to initialize PHY\n");
-+              return err;
-+      }
-+
-+      err = phy_power_on(pcie->phy);
-+      if (err) {
-+              dev_err(dev, "failed to power on PHY\n");
-+              goto err_phy_on;
-+      }
-+
-+      err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
-+      if (err) {
-+              dev_err(dev, "failed to deassert PHYs\n");
-+              goto err_phy_deassert;
-+      }
-+
-+      /*
-+       * Wait for the time needed to complete the bulk de-assert above.
-+       * This time is specific for EN7581 SoC.
-+       */
-+      mdelay(PCIE_EN7581_RESET_TIME_MS);
-+
-+      pm_runtime_enable(dev);
-+      pm_runtime_get_sync(dev);
-+
-+      err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
-+      if (err) {
-+              dev_err(dev, "failed to prepare clock\n");
-+              goto err_clk_prepare;
-+      }
-+
-+      val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
-+            FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
-+            FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
-+            FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
-+      writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
-+
-+      val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
-+            FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
-+            FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
-+            FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
-+      writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
-+
-+      err = clk_bulk_enable(pcie->num_clks, pcie->clks);
-+      if (err) {
-+              dev_err(dev, "failed to prepare clock\n");
-+              goto err_clk_enable;
-+      }
-+
-+      return 0;
-+
-+err_clk_enable:
-+      clk_bulk_unprepare(pcie->num_clks, pcie->clks);
-+err_clk_prepare:
-+      pm_runtime_put_sync(dev);
-+      pm_runtime_disable(dev);
-+      reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
-+err_phy_deassert:
-+      phy_power_off(pcie->phy);
-+err_phy_on:
-+      phy_exit(pcie->phy);
-+
-+      return err;
-+}
-+
- static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
- {
-       struct device *dev = pcie->dev;
-@@ -1113,7 +1213,18 @@ static const struct mtk_gen3_pcie_pdata
-       },
- };
-+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
-+      .power_up = mtk_pcie_en7581_power_up,
-+      .phy_resets = {
-+              .id[0] = "phy-lane0",
-+              .id[1] = "phy-lane1",
-+              .id[2] = "phy-lane2",
-+              .num_resets = 3,
-+      },
-+};
-+
- static const struct of_device_id mtk_pcie_of_match[] = {
-+      { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
-       { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
-       {},
- };
diff --git a/target/linux/airoha/patches-6.6/022-v6.11-phy-airoha-Add-PCIe-PHY-driver-for-EN7581-SoC.patch b/target/linux/airoha/patches-6.6/022-v6.11-phy-airoha-Add-PCIe-PHY-driver-for-EN7581-SoC.patch
deleted file mode 100644 (file)
index 3f9443e..0000000
+++ /dev/null
@@ -1,1783 +0,0 @@
-From d7d2818b93837def4a33f92da2e64c3a2752c47e Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sat, 15 Jun 2024 23:15:42 +0200
-Subject: [PATCH] phy: airoha: Add PCIe PHY driver for EN7581 SoC.
-
-Introduce support for Airoha PCIe PHY controller available in EN7581
-SoC.
-
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Tested-by: Zhengping Zhang <zhengping.zhang@airoha.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/20ac99aa8628d97778594f606681db7f868f24fe.1718485860.git.lorenzo@kernel.org
-Signed-off-by: Vinod Koul <vkoul@kernel.org>
----
- MAINTAINERS                        |    8 +
- drivers/phy/Kconfig                |   10 +
- drivers/phy/Makefile               |    1 +
- drivers/phy/phy-airoha-pcie-regs.h |  477 +++++++++++
- drivers/phy/phy-airoha-pcie.c      | 1248 ++++++++++++++++++++++++++++
- 5 files changed, 1744 insertions(+)
- create mode 100644 drivers/phy/phy-airoha-pcie-regs.h
- create mode 100644 drivers/phy/phy-airoha-pcie.c
-
---- a/drivers/phy/Kconfig
-+++ b/drivers/phy/Kconfig
-@@ -72,6 +72,16 @@ config PHY_CAN_TRANSCEIVER
-         functional modes using gpios and sets the attribute max link
-         rate, for CAN drivers.
-+config PHY_AIROHA_PCIE
-+      tristate "Airoha PCIe-PHY Driver"
-+      depends on ARCH_AIROHA || COMPILE_TEST
-+      depends on OF
-+      select GENERIC_PHY
-+      help
-+        Say Y here to add support for Airoha PCIe PHY driver.
-+        This driver create the basic PHY instance and provides initialize
-+        callback for PCIe GEN3 port.
-+
- source "drivers/phy/allwinner/Kconfig"
- source "drivers/phy/amlogic/Kconfig"
- source "drivers/phy/broadcom/Kconfig"
---- a/drivers/phy/Makefile
-+++ b/drivers/phy/Makefile
-@@ -10,6 +10,7 @@ obj-$(CONFIG_PHY_LPC18XX_USB_OTG)    += phy
- obj-$(CONFIG_PHY_XGENE)                       += phy-xgene.o
- obj-$(CONFIG_PHY_PISTACHIO_USB)               += phy-pistachio-usb.o
- obj-$(CONFIG_USB_LGM_PHY)             += phy-lgm-usb.o
-+obj-$(CONFIG_PHY_AIROHA_PCIE)         += phy-airoha-pcie.o
- obj-y                                 += allwinner/   \
-                                          amlogic/     \
-                                          broadcom/    \
---- /dev/null
-+++ b/drivers/phy/phy-airoha-pcie-regs.h
-@@ -0,0 +1,477 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ */
-+
-+#ifndef _PHY_AIROHA_PCIE_H
-+#define _PHY_AIROHA_PCIE_H
-+
-+/* CSR_2L */
-+#define REG_CSR_2L_CMN                                0x0000
-+#define CSR_2L_PXP_CMN_LANE_EN                        BIT(0)
-+#define CSR_2L_PXP_CMN_TRIM_MASK              GENMASK(28, 24)
-+
-+#define REG_CSR_2L_JCPLL_IB_EXT                       0x0004
-+#define REG_CSR_2L_JCPLL_LPF_SHCK_EN          BIT(8)
-+#define CSR_2L_PXP_JCPLL_CHP_IBIAS            GENMASK(21, 16)
-+#define CSR_2L_PXP_JCPLL_CHP_IOFST            GENMASK(29, 24)
-+
-+#define REG_CSR_2L_JCPLL_LPF_BR                       0x0008
-+#define CSR_2L_PXP_JCPLL_LPF_BR                       GENMASK(4, 0)
-+#define CSR_2L_PXP_JCPLL_LPF_BC                       GENMASK(12, 8)
-+#define CSR_2L_PXP_JCPLL_LPF_BP                       GENMASK(20, 16)
-+#define CSR_2L_PXP_JCPLL_LPF_BWR              GENMASK(28, 24)
-+
-+#define REG_CSR_2L_JCPLL_LPF_BWC              0x000c
-+#define CSR_2L_PXP_JCPLL_LPF_BWC              GENMASK(4, 0)
-+#define CSR_2L_PXP_JCPLL_KBAND_CODE           GENMASK(23, 16)
-+#define CSR_2L_PXP_JCPLL_KBAND_DIV            GENMASK(26, 24)
-+
-+#define REG_CSR_2L_JCPLL_KBAND_KFC            0x0010
-+#define CSR_2L_PXP_JCPLL_KBAND_KFC            GENMASK(1, 0)
-+#define CSR_2L_PXP_JCPLL_KBAND_KF             GENMASK(9, 8)
-+#define CSR_2L_PXP_JCPLL_KBAND_KS             GENMASK(17, 16)
-+#define CSR_2L_PXP_JCPLL_POSTDIV_EN           BIT(24)
-+
-+#define REG_CSR_2L_JCPLL_MMD_PREDIV_MODE      0x0014
-+#define CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE      GENMASK(1, 0)
-+#define CSR_2L_PXP_JCPLL_POSTDIV_D2           BIT(16)
-+#define CSR_2L_PXP_JCPLL_POSTDIV_D5           BIT(24)
-+
-+#define CSR_2L_PXP_JCPLL_MONCK                        0x0018
-+#define CSR_2L_PXP_JCPLL_REFIN_DIV            GENMASK(25, 24)
-+
-+#define REG_CSR_2L_JCPLL_RST_DLY              0x001c
-+#define CSR_2L_PXP_JCPLL_RST_DLY              GENMASK(2, 0)
-+#define CSR_2L_PXP_JCPLL_RST                  BIT(8)
-+#define CSR_2L_PXP_JCPLL_SDM_DI_EN            BIT(16)
-+#define CSR_2L_PXP_JCPLL_SDM_DI_LS            GENMASK(25, 24)
-+
-+#define REG_CSR_2L_JCPLL_SDM_IFM              0x0020
-+#define CSR_2L_PXP_JCPLL_SDM_IFM              BIT(0)
-+
-+#define REG_CSR_2L_JCPLL_SDM_HREN             0x0024
-+#define CSR_2L_PXP_JCPLL_SDM_HREN             BIT(0)
-+#define CSR_2L_PXP_JCPLL_TCL_AMP_EN           BIT(8)
-+#define CSR_2L_PXP_JCPLL_TCL_AMP_GAIN         GENMASK(18, 16)
-+#define CSR_2L_PXP_JCPLL_TCL_AMP_VREF         GENMASK(28, 24)
-+
-+#define REG_CSR_2L_JCPLL_TCL_CMP              0x0028
-+#define CSR_2L_PXP_JCPLL_TCL_LPF_EN           BIT(16)
-+#define CSR_2L_PXP_JCPLL_TCL_LPF_BW           GENMASK(26, 24)
-+
-+#define REG_CSR_2L_JCPLL_VCODIV                       0x002c
-+#define CSR_2L_PXP_JCPLL_VCO_CFIX             GENMASK(9, 8)
-+#define CSR_2L_PXP_JCPLL_VCO_HALFLSB_EN               BIT(16)
-+#define CSR_2L_PXP_JCPLL_VCO_SCAPWR           GENMASK(26, 24)
-+
-+#define REG_CSR_2L_JCPLL_VCO_TCLVAR           0x0030
-+#define CSR_2L_PXP_JCPLL_VCO_TCLVAR           GENMASK(2, 0)
-+
-+#define REG_CSR_2L_JCPLL_SSC                          0x0038
-+#define CSR_2L_PXP_JCPLL_SSC_EN                       BIT(0)
-+#define CSR_2L_PXP_JCPLL_SSC_PHASE_INI                BIT(8)
-+#define CSR_2L_PXP_JCPLL_SSC_TRI_EN           BIT(16)
-+
-+#define REG_CSR_2L_JCPLL_SSC_DELTA1           0x003c
-+#define CSR_2L_PXP_JCPLL_SSC_DELTA1           GENMASK(15, 0)
-+#define CSR_2L_PXP_JCPLL_SSC_DELTA            GENMASK(31, 16)
-+
-+#define REG_CSR_2L_JCPLL_SSC_PERIOD           0x0040
-+#define CSR_2L_PXP_JCPLL_SSC_PERIOD           GENMASK(15, 0)
-+
-+#define REG_CSR_2L_JCPLL_TCL_VTP_EN           0x004c
-+#define CSR_2L_PXP_JCPLL_SPARE_LOW            GENMASK(31, 24)
-+
-+#define REG_CSR_2L_JCPLL_TCL_KBAND_VREF               0x0050
-+#define CSR_2L_PXP_JCPLL_TCL_KBAND_VREF               GENMASK(4, 0)
-+#define CSR_2L_PXP_JCPLL_VCO_KBAND_MEAS_EN    BIT(24)
-+
-+#define REG_CSR_2L_750M_SYS_CK                        0x0054
-+#define CSR_2L_PXP_TXPLL_LPF_SHCK_EN          BIT(16)
-+#define CSR_2L_PXP_TXPLL_CHP_IBIAS            GENMASK(29, 24)
-+
-+#define REG_CSR_2L_TXPLL_CHP_IOFST            0x0058
-+#define CSR_2L_PXP_TXPLL_CHP_IOFST            GENMASK(5, 0)
-+#define CSR_2L_PXP_TXPLL_LPF_BR                       GENMASK(12, 8)
-+#define CSR_2L_PXP_TXPLL_LPF_BC                       GENMASK(20, 16)
-+#define CSR_2L_PXP_TXPLL_LPF_BP                       GENMASK(28, 24)
-+
-+#define REG_CSR_2L_TXPLL_LPF_BWR              0x005c
-+#define CSR_2L_PXP_TXPLL_LPF_BWR              GENMASK(4, 0)
-+#define CSR_2L_PXP_TXPLL_LPF_BWC              GENMASK(12, 8)
-+#define CSR_2L_PXP_TXPLL_KBAND_CODE           GENMASK(31, 24)
-+
-+#define REG_CSR_2L_TXPLL_KBAND_DIV            0x0060
-+#define CSR_2L_PXP_TXPLL_KBAND_DIV            GENMASK(2, 0)
-+#define CSR_2L_PXP_TXPLL_KBAND_KFC            GENMASK(9, 8)
-+#define CSR_2L_PXP_TXPLL_KBAND_KF             GENMASK(17, 16)
-+#define CSR_2L_PXP_txpll_KBAND_KS             GENMASK(25, 24)
-+
-+#define REG_CSR_2L_TXPLL_POSTDIV              0x0064
-+#define CSR_2L_PXP_TXPLL_POSTDIV_EN           BIT(0)
-+#define CSR_2L_PXP_TXPLL_MMD_PREDIV_MODE      GENMASK(9, 8)
-+#define CSR_2L_PXP_TXPLL_PHY_CK1_EN           BIT(24)
-+
-+#define REG_CSR_2L_TXPLL_PHY_CK2              0x0068
-+#define CSR_2L_PXP_TXPLL_REFIN_INTERNAL               BIT(24)
-+
-+#define REG_CSR_2L_TXPLL_REFIN_DIV            0x006c
-+#define CSR_2L_PXP_TXPLL_REFIN_DIV            GENMASK(1, 0)
-+#define CSR_2L_PXP_TXPLL_RST_DLY              GENMASK(10, 8)
-+#define CSR_2L_PXP_TXPLL_PLL_RSTB             BIT(16)
-+
-+#define REG_CSR_2L_TXPLL_SDM_DI_LS            0x0070
-+#define CSR_2L_PXP_TXPLL_SDM_DI_LS            GENMASK(1, 0)
-+#define CSR_2L_PXP_TXPLL_SDM_IFM              BIT(8)
-+#define CSR_2L_PXP_TXPLL_SDM_ORD              GENMASK(25, 24)
-+
-+#define REG_CSR_2L_TXPLL_SDM_OUT              0x0074
-+#define CSR_2L_PXP_TXPLL_TCL_AMP_EN           BIT(16)
-+#define CSR_2L_PXP_TXPLL_TCL_AMP_GAIN         GENMASK(26, 24)
-+
-+#define REG_CSR_2L_TXPLL_TCL_AMP_VREF         0x0078
-+#define CSR_2L_PXP_TXPLL_TCL_AMP_VREF         GENMASK(4, 0)
-+#define CSR_2L_PXP_TXPLL_TCL_LPF_EN           BIT(24)
-+
-+#define REG_CSR_2L_TXPLL_TCL_LPF_BW           0x007c
-+#define CSR_2L_PXP_TXPLL_TCL_LPF_BW           GENMASK(2, 0)
-+#define CSR_2L_PXP_TXPLL_VCO_CFIX             GENMASK(17, 16)
-+#define CSR_2L_PXP_TXPLL_VCO_HALFLSB_EN               BIT(24)
-+
-+#define REG_CSR_2L_TXPLL_VCO_SCAPWR           0x0080
-+#define CSR_2L_PXP_TXPLL_VCO_SCAPWR           GENMASK(2, 0)
-+
-+#define REG_CSR_2L_TXPLL_SSC                  0x0084
-+#define CSR_2L_PXP_TXPLL_SSC_EN                       BIT(0)
-+#define CSR_2L_PXP_TXPLL_SSC_PHASE_INI                BIT(8)
-+
-+#define REG_CSR_2L_TXPLL_SSC_DELTA1           0x0088
-+#define CSR_2L_PXP_TXPLL_SSC_DELTA1           GENMASK(15, 0)
-+#define CSR_2L_PXP_TXPLL_SSC_DELTA            GENMASK(31, 16)
-+
-+#define REG_CSR_2L_TXPLL_SSC_PERIOD           0x008c
-+#define CSR_2L_PXP_txpll_SSC_PERIOD           GENMASK(15, 0)
-+
-+#define REG_CSR_2L_TXPLL_VTP                  0x0090
-+#define CSR_2L_PXP_TXPLL_VTP_EN                       BIT(0)
-+
-+#define REG_CSR_2L_TXPLL_TCL_VTP              0x0098
-+#define CSR_2L_PXP_TXPLL_SPARE_L              GENMASK(31, 24)
-+
-+#define REG_CSR_2L_TXPLL_TCL_KBAND_VREF               0x009c
-+#define CSR_2L_PXP_TXPLL_TCL_KBAND_VREF               GENMASK(4, 0)
-+#define CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN    BIT(24)
-+
-+#define REG_CSR_2L_TXPLL_POSTDIV_D256         0x00a0
-+#define CSR_2L_PXP_CLKTX0_AMP                 GENMASK(10, 8)
-+#define CSR_2L_PXP_CLKTX0_OFFSET              GENMASK(17, 16)
-+#define CSR_2L_PXP_CLKTX0_SR                  GENMASK(25, 24)
-+
-+#define REG_CSR_2L_CLKTX0_FORCE_OUT1          0x00a4
-+#define CSR_2L_PXP_CLKTX0_HZ                  BIT(8)
-+#define CSR_2L_PXP_CLKTX0_IMP_SEL             GENMASK(20, 16)
-+#define CSR_2L_PXP_CLKTX1_AMP                 GENMASK(26, 24)
-+
-+#define REG_CSR_2L_CLKTX1_OFFSET              0x00a8
-+#define CSR_2L_PXP_CLKTX1_OFFSET              GENMASK(1, 0)
-+#define CSR_2L_PXP_CLKTX1_SR                  GENMASK(9, 8)
-+#define CSR_2L_PXP_CLKTX1_HZ                  BIT(24)
-+
-+#define REG_CSR_2L_CLKTX1_IMP_SEL             0x00ac
-+#define CSR_2L_PXP_CLKTX1_IMP_SEL             GENMASK(4, 0)
-+
-+#define REG_CSR_2L_PLL_CMN_RESERVE0           0x00b0
-+#define CSR_2L_PXP_PLL_RESERVE_MASK           GENMASK(15, 0)
-+
-+#define REG_CSR_2L_TX0_CKLDO                  0x00cc
-+#define CSR_2L_PXP_TX0_CKLDO_EN                       BIT(0)
-+#define CSR_2L_PXP_TX0_DMEDGEGEN_EN           BIT(24)
-+
-+#define REG_CSR_2L_TX1_CKLDO                  0x00e8
-+#define CSR_2L_PXP_TX1_CKLDO_EN                       BIT(0)
-+#define CSR_2L_PXP_TX1_DMEDGEGEN_EN           BIT(24)
-+
-+#define REG_CSR_2L_TX1_MULTLANE                       0x00ec
-+#define CSR_2L_PXP_TX1_MULTLANE_EN            BIT(0)
-+
-+#define REG_CSR_2L_RX0_REV0                   0x00fc
-+#define CSR_2L_PXP_VOS_PNINV                  GENMASK(3, 2)
-+#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE                GENMASK(6, 4)
-+#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE         GENMASK(10, 8)
-+
-+#define REG_CSR_2L_RX0_PHYCK_DIV              0x0100
-+#define CSR_2L_PXP_RX0_PHYCK_SEL              GENMASK(9, 8)
-+#define CSR_2L_PXP_RX0_PHYCK_RSTB             BIT(16)
-+#define CSR_2L_PXP_RX0_TDC_CK_SEL             BIT(24)
-+
-+#define REG_CSR_2L_CDR0_PD_PICAL_CKD8_INV     0x0104
-+#define CSR_2L_PXP_CDR0_PD_EDGE_DISABLE               BIT(8)
-+
-+#define REG_CSR_2L_CDR0_LPF_RATIO             0x0110
-+#define CSR_2L_PXP_CDR0_LPF_TOP_LIM           GENMASK(26, 8)
-+
-+#define REG_CSR_2L_CDR0_PR_INJ_MODE           0x011c
-+#define CSR_2L_PXP_CDR0_INJ_FORCE_OFF         BIT(24)
-+
-+#define REG_CSR_2L_CDR0_PR_BETA_DAC           0x0120
-+#define CSR_2L_PXP_CDR0_PR_BETA_SEL           GENMASK(19, 16)
-+#define CSR_2L_PXP_CDR0_PR_KBAND_DIV          GENMASK(26, 24)
-+
-+#define REG_CSR_2L_CDR0_PR_VREG_IBAND         0x0124
-+#define CSR_2L_PXP_CDR0_PR_VREG_IBAND         GENMASK(2, 0)
-+#define CSR_2L_PXP_CDR0_PR_VREG_CKBUF         GENMASK(10, 8)
-+
-+#define REG_CSR_2L_CDR0_PR_CKREF_DIV          0x0128
-+#define CSR_2L_PXP_CDR0_PR_CKREF_DIV          GENMASK(1, 0)
-+
-+#define REG_CSR_2L_CDR0_PR_MONCK              0x012c
-+#define CSR_2L_PXP_CDR0_PR_MONCK_ENABLE               BIT(0)
-+#define CSR_2L_PXP_CDR0_PR_RESERVE0           GENMASK(19, 16)
-+
-+#define REG_CSR_2L_CDR0_PR_COR_HBW            0x0130
-+#define CSR_2L_PXP_CDR0_PR_LDO_FORCE_ON               BIT(8)
-+#define CSR_2L_PXP_CDR0_PR_CKREF_DIV1         GENMASK(17, 16)
-+
-+#define REG_CSR_2L_CDR0_PR_MONPI              0x0134
-+#define CSR_2L_PXP_CDR0_PR_XFICK_EN           BIT(8)
-+
-+#define REG_CSR_2L_RX0_SIGDET_DCTEST          0x0140
-+#define CSR_2L_PXP_RX0_SIGDET_LPF_CTRL                GENMASK(9, 8)
-+#define CSR_2L_PXP_RX0_SIGDET_PEAK            GENMASK(25, 24)
-+
-+#define REG_CSR_2L_RX0_SIGDET_VTH_SEL         0x0144
-+#define CSR_2L_PXP_RX0_SIGDET_VTH_SEL         GENMASK(4, 0)
-+#define CSR_2L_PXP_RX0_FE_VB_EQ1_EN           BIT(24)
-+
-+#define REG_CSR_2L_PXP_RX0_FE_VB_EQ2          0x0148
-+#define CSR_2L_PXP_RX0_FE_VB_EQ2_EN           BIT(0)
-+#define CSR_2L_PXP_RX0_FE_VB_EQ3_EN           BIT(8)
-+#define CSR_2L_PXP_RX0_FE_VCM_GEN_PWDB                BIT(16)
-+
-+#define REG_CSR_2L_PXP_RX0_OSCAL_CTLE1IOS     0x0158
-+#define CSR_2L_PXP_RX0_PR_OSCAL_VGA1IOS               GENMASK(29, 24)
-+
-+#define REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS               0x015c
-+#define CSR_2L_PXP_RX0_PR_OSCAL_VGA1VOS               GENMASK(5, 0)
-+#define CSR_2L_PXP_RX0_PR_OSCAL_VGA2IOS               GENMASK(13, 8)
-+
-+#define REG_CSR_2L_RX1_REV0                   0x01b4
-+
-+#define REG_CSR_2L_RX1_PHYCK_DIV              0x01b8
-+#define CSR_2L_PXP_RX1_PHYCK_SEL              GENMASK(9, 8)
-+#define CSR_2L_PXP_RX1_PHYCK_RSTB             BIT(16)
-+#define CSR_2L_PXP_RX1_TDC_CK_SEL             BIT(24)
-+
-+#define REG_CSR_2L_CDR1_PD_PICAL_CKD8_INV     0x01bc
-+#define CSR_2L_PXP_CDR1_PD_EDGE_DISABLE               BIT(8)
-+
-+#define REG_CSR_2L_CDR1_PR_BETA_DAC           0x01d8
-+#define CSR_2L_PXP_CDR1_PR_BETA_SEL           GENMASK(19, 16)
-+#define CSR_2L_PXP_CDR1_PR_KBAND_DIV          GENMASK(26, 24)
-+
-+#define REG_CSR_2L_CDR1_PR_MONCK              0x01e4
-+#define CSR_2L_PXP_CDR1_PR_MONCK_ENABLE               BIT(0)
-+#define CSR_2L_PXP_CDR1_PR_RESERVE0           GENMASK(19, 16)
-+
-+#define REG_CSR_2L_CDR1_LPF_RATIO             0x01c8
-+#define CSR_2L_PXP_CDR1_LPF_TOP_LIM           GENMASK(26, 8)
-+
-+#define REG_CSR_2L_CDR1_PR_INJ_MODE           0x01d4
-+#define CSR_2L_PXP_CDR1_INJ_FORCE_OFF         BIT(24)
-+
-+#define REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL     0x01dc
-+#define CSR_2L_PXP_CDR1_PR_VREG_IBAND         GENMASK(2, 0)
-+#define CSR_2L_PXP_CDR1_PR_VREG_CKBUF         GENMASK(10, 8)
-+
-+#define REG_CSR_2L_CDR1_PR_CKREF_DIV          0x01e0
-+#define CSR_2L_PXP_CDR1_PR_CKREF_DIV          GENMASK(1, 0)
-+
-+#define REG_CSR_2L_CDR1_PR_COR_HBW            0x01e8
-+#define CSR_2L_PXP_CDR1_PR_LDO_FORCE_ON               BIT(8)
-+#define CSR_2L_PXP_CDR1_PR_CKREF_DIV1         GENMASK(17, 16)
-+
-+#define REG_CSR_2L_CDR1_PR_MONPI              0x01ec
-+#define CSR_2L_PXP_CDR1_PR_XFICK_EN           BIT(8)
-+
-+#define REG_CSR_2L_RX1_DAC_RANGE_EYE          0x01f4
-+#define CSR_2L_PXP_RX1_SIGDET_LPF_CTRL                GENMASK(25, 24)
-+
-+#define REG_CSR_2L_RX1_SIGDET_NOVTH           0x01f8
-+#define CSR_2L_PXP_RX1_SIGDET_PEAK            GENMASK(9, 8)
-+#define CSR_2L_PXP_RX1_SIGDET_VTH_SEL         GENMASK(20, 16)
-+
-+#define REG_CSR_2L_RX1_FE_VB_EQ1              0x0200
-+#define CSR_2L_PXP_RX1_FE_VB_EQ1_EN           BIT(0)
-+#define CSR_2L_PXP_RX1_FE_VB_EQ2_EN           BIT(8)
-+#define CSR_2L_PXP_RX1_FE_VB_EQ3_EN           BIT(16)
-+#define CSR_2L_PXP_RX1_FE_VCM_GEN_PWDB                BIT(24)
-+
-+#define REG_CSR_2L_RX1_OSCAL_VGA1IOS          0x0214
-+#define CSR_2L_PXP_RX1_PR_OSCAL_VGA1IOS               GENMASK(5, 0)
-+#define CSR_2L_PXP_RX1_PR_OSCAL_VGA1VOS               GENMASK(13, 8)
-+#define CSR_2L_PXP_RX1_PR_OSCAL_VGA2IOS               GENMASK(21, 16)
-+
-+/* PMA */
-+#define REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1 0x0004
-+#define PCIE_LCPLL_MAN_PWDB                   BIT(0)
-+
-+#define REG_PCIE_PMA_SEQUENCE_DISB_CTRL1      0x010c
-+#define PCIE_DISB_RX_SDCAL_EN                 BIT(0)
-+
-+#define REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1        0x0114
-+#define PCIE_FORCE_RX_SDCAL_EN                        BIT(0)
-+
-+#define REG_PCIE_PMA_SS_RX_FREQ_DET1          0x014c
-+#define PCIE_PLL_FT_LOCK_CYCLECNT             GENMASK(15, 0)
-+#define PCIE_PLL_FT_UNLOCK_CYCLECNT           GENMASK(31, 16)
-+
-+#define REG_PCIE_PMA_SS_RX_FREQ_DET2          0x0150
-+#define PCIE_LOCK_TARGET_BEG                  GENMASK(15, 0)
-+#define PCIE_LOCK_TARGET_END                  GENMASK(31, 16)
-+
-+#define REG_PCIE_PMA_SS_RX_FREQ_DET3          0x0154
-+#define PCIE_UNLOCK_TARGET_BEG                        GENMASK(15, 0)
-+#define PCIE_UNLOCK_TARGET_END                        GENMASK(31, 16)
-+
-+#define REG_PCIE_PMA_SS_RX_FREQ_DET4          0x0158
-+#define PCIE_FREQLOCK_DET_EN                  GENMASK(2, 0)
-+#define PCIE_LOCK_LOCKTH                      GENMASK(11, 8)
-+#define PCIE_UNLOCK_LOCKTH                    GENMASK(15, 12)
-+
-+#define REG_PCIE_PMA_SS_RX_CAL1                       0x0160
-+#define REG_PCIE_PMA_SS_RX_CAL2                       0x0164
-+#define PCIE_CAL_OUT_OS                               GENMASK(11, 8)
-+
-+#define REG_PCIE_PMA_SS_RX_SIGDET0            0x0168
-+#define PCIE_SIGDET_WIN_NONVLD_TIMES          GENMASK(28, 24)
-+
-+#define REG_PCIE_PMA_TX_RESET                 0x0260
-+#define PCIE_TX_TOP_RST                               BIT(0)
-+#define PCIE_TX_CAL_RST                               BIT(8)
-+
-+#define REG_PCIE_PMA_RX_FORCE_MODE0           0x0294
-+#define PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL    GENMASK(1, 0)
-+
-+#define REG_PCIE_PMA_SS_DA_XPON_PWDB0         0x034c
-+#define PCIE_DA_XPON_CDR_PR_PWDB              BIT(8)
-+
-+#define REG_PCIE_PMA_SW_RESET                 0x0460
-+#define PCIE_SW_RX_FIFO_RST                   BIT(0)
-+#define PCIE_SW_RX_RST                                BIT(1)
-+#define PCIE_SW_TX_RST                                BIT(2)
-+#define PCIE_SW_PMA_RST                               BIT(3)
-+#define PCIE_SW_ALLPCS_RST                    BIT(4)
-+#define PCIE_SW_REF_RST                               BIT(5)
-+#define PCIE_SW_TX_FIFO_RST                   BIT(6)
-+#define PCIE_SW_XFI_TXPCS_RST                 BIT(7)
-+#define PCIE_SW_XFI_RXPCS_RST                 BIT(8)
-+#define PCIE_SW_XFI_RXPCS_BIST_RST            BIT(9)
-+#define PCIE_SW_HSG_TXPCS_RST                 BIT(10)
-+#define PCIE_SW_HSG_RXPCS_RST                 BIT(11)
-+#define PCIE_PMA_SW_RST                               (PCIE_SW_RX_FIFO_RST | \
-+                                               PCIE_SW_RX_RST | \
-+                                               PCIE_SW_TX_RST | \
-+                                               PCIE_SW_PMA_RST | \
-+                                               PCIE_SW_ALLPCS_RST | \
-+                                               PCIE_SW_REF_RST | \
-+                                               PCIE_SW_TX_FIFO_RST | \
-+                                               PCIE_SW_XFI_TXPCS_RST | \
-+                                               PCIE_SW_XFI_RXPCS_RST | \
-+                                               PCIE_SW_XFI_RXPCS_BIST_RST | \
-+                                               PCIE_SW_HSG_TXPCS_RST | \
-+                                               PCIE_SW_HSG_RXPCS_RST)
-+
-+#define REG_PCIE_PMA_RO_RX_FREQDET            0x0530
-+#define PCIE_RO_FBCK_LOCK                     BIT(0)
-+#define PCIE_RO_FL_OUT                                GENMASK(31, 16)
-+
-+#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC 0x0794
-+#define PCIE_FORCE_DA_PXP_CDR_PR_IDAC         GENMASK(10, 0)
-+#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC     BIT(16)
-+#define PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW   BIT(24)
-+
-+#define REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW       0x0798
-+#define PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW               GENMASK(30, 0)
-+
-+#define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS   0x079c
-+#define PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW   BIT(16)
-+
-+#define REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW       0x0800
-+#define PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW               GENMASK(30, 0)
-+
-+#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB 0x081c
-+#define PCIE_FORCE_DA_PXP_CDR_PD_PWDB         BIT(0)
-+#define PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB     BIT(8)
-+
-+#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C        0x0820
-+#define PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN     BIT(0)
-+#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN BIT(8)
-+#define PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN     BIT(16)
-+#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN BIT(24)
-+
-+#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB   0x0824
-+#define PCIE_FORCE_DA_PXP_CDR_PR_PWDB                 BIT(16)
-+#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB             BIT(24)
-+
-+#define REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT    0x0828
-+#define PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN      BIT(0)
-+#define PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN  BIT(8)
-+#define PCIE_FORCE_DA_PXP_JCPLL_EN            BIT(16)
-+#define PCIE_FORCE_SEL_DA_PXP_JCPLL_EN                BIT(24)
-+
-+#define REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST 0x0084c
-+#define PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB      BIT(16)
-+#define PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB  BIT(24)
-+
-+#define REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT 0x0854
-+#define PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN      BIT(0)
-+#define PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN  BIT(8)
-+#define PCIE_FORCE_DA_PXP_TXPLL_EN            BIT(16)
-+#define PCIE_FORCE_SEL_DA_PXP_TXPLL_EN                BIT(24)
-+
-+#define REG_PCIE_PMA_SCAN_MODE                                0x0884
-+#define PCIE_FORCE_DA_PXP_JCPLL_KBAND_LOAD_EN         BIT(0)
-+#define PCIE_FORCE_SEL_DA_PXP_JCPLL_KBAND_LOAD_EN     BIT(8)
-+
-+#define REG_PCIE_PMA_DIG_RESERVE_13           0x08bc
-+#define PCIE_FLL_IDAC_PCIEG1                  GENMASK(10, 0)
-+#define PCIE_FLL_IDAC_PCIEG2                  GENMASK(26, 16)
-+
-+#define REG_PCIE_PMA_DIG_RESERVE_14           0x08c0
-+#define PCIE_FLL_IDAC_PCIEG3                  GENMASK(10, 0)
-+#define PCIE_FLL_LOAD_EN                      BIT(16)
-+
-+#define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL     0x088c
-+#define PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL             GENMASK(1, 0)
-+#define PCIE_FORCE_SEL_DA_PXP_RX_FE_GAIN_CTRL         BIT(8)
-+
-+#define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB  0x0894
-+#define PCIE_FORCE_DA_PXP_RX_FE_PWDB          BIT(0)
-+#define PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB      BIT(8)
-+
-+#define REG_PCIE_PMA_DIG_RESERVE_12           0x08b8
-+#define PCIE_FORCE_PMA_RX_SPEED                       GENMASK(7, 4)
-+#define PCIE_FORCE_SEL_PMA_RX_SPEED           BIT(7)
-+
-+#define REG_PCIE_PMA_DIG_RESERVE_17           0x08e0
-+
-+#define REG_PCIE_PMA_DIG_RESERVE_18           0x08e4
-+#define PCIE_PXP_RX_VTH_SEL_PCIE_G1           GENMASK(4, 0)
-+#define PCIE_PXP_RX_VTH_SEL_PCIE_G2           GENMASK(12, 8)
-+#define PCIE_PXP_RX_VTH_SEL_PCIE_G3           GENMASK(20, 16)
-+
-+#define REG_PCIE_PMA_DIG_RESERVE_19           0x08e8
-+#define PCIE_PCP_RX_REV0_PCIE_GEN1            GENMASK(31, 16)
-+
-+#define REG_PCIE_PMA_DIG_RESERVE_20           0x08ec
-+#define PCIE_PCP_RX_REV0_PCIE_GEN2            GENMASK(15, 0)
-+#define PCIE_PCP_RX_REV0_PCIE_GEN3            GENMASK(31, 16)
-+
-+#define REG_PCIE_PMA_DIG_RESERVE_21           0x08f0
-+#define REG_PCIE_PMA_DIG_RESERVE_22           0x08f4
-+#define REG_PCIE_PMA_DIG_RESERVE_27           0x0908
-+#define REG_PCIE_PMA_DIG_RESERVE_30           0x0914
-+
-+#endif /* _PHY_AIROHA_PCIE_H */
---- /dev/null
-+++ b/drivers/phy/phy-airoha-pcie.c
-@@ -0,0 +1,1248 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ */
-+
-+#include <linux/bitfield.h>
-+#include <linux/delay.h>
-+#include <linux/io.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/phy/phy.h>
-+#include <linux/platform_device.h>
-+#include <linux/slab.h>
-+
-+#include "phy-airoha-pcie-regs.h"
-+
-+#define LEQ_LEN_CTRL_MAX_VAL  7
-+#define FREQ_LOCK_MAX_ATTEMPT 10
-+
-+enum airoha_pcie_port_gen {
-+      PCIE_PORT_GEN1 = 1,
-+      PCIE_PORT_GEN2,
-+      PCIE_PORT_GEN3,
-+};
-+
-+/**
-+ * struct airoha_pcie_phy - PCIe phy driver main structure
-+ * @dev: pointer to device
-+ * @phy: pointer to generic phy
-+ * @csr_2l: Analogic lane IO mapped register base address
-+ * @pma0: IO mapped register base address of PMA0-PCIe
-+ * @pma1: IO mapped register base address of PMA1-PCIe
-+ */
-+struct airoha_pcie_phy {
-+      struct device *dev;
-+      struct phy *phy;
-+      void __iomem *csr_2l;
-+      void __iomem *pma0;
-+      void __iomem *pma1;
-+};
-+
-+static void airoha_phy_clear_bits(void __iomem *reg, u32 mask)
-+{
-+      u32 val = readl(reg) & ~mask;
-+
-+      writel(val, reg);
-+}
-+
-+static void airoha_phy_set_bits(void __iomem *reg, u32 mask)
-+{
-+      u32 val = readl(reg) | mask;
-+
-+      writel(val, reg);
-+}
-+
-+static void airoha_phy_update_bits(void __iomem *reg, u32 mask, u32 val)
-+{
-+      u32 tmp = readl(reg);
-+
-+      tmp &= ~mask;
-+      tmp |= val & mask;
-+      writel(tmp, reg);
-+}
-+
-+#define airoha_phy_update_field(reg, mask, val)                                       \
-+      do {                                                                    \
-+              BUILD_BUG_ON_MSG(!__builtin_constant_p((mask)),                 \
-+                               "mask is not constant");                       \
-+              airoha_phy_update_bits((reg), (mask),                           \
-+                                     FIELD_PREP((mask), (val)));              \
-+      } while (0)
-+
-+#define airoha_phy_csr_2l_clear_bits(pcie_phy, reg, mask)                     \
-+      airoha_phy_clear_bits((pcie_phy)->csr_2l + (reg), (mask))
-+#define airoha_phy_csr_2l_set_bits(pcie_phy, reg, mask)                               \
-+      airoha_phy_set_bits((pcie_phy)->csr_2l + (reg), (mask))
-+#define airoha_phy_csr_2l_update_field(pcie_phy, reg, mask, val)              \
-+      airoha_phy_update_field((pcie_phy)->csr_2l + (reg), (mask), (val))
-+#define airoha_phy_pma0_clear_bits(pcie_phy, reg, mask)                               \
-+      airoha_phy_clear_bits((pcie_phy)->pma0 + (reg), (mask))
-+#define airoha_phy_pma1_clear_bits(pcie_phy, reg, mask)                               \
-+      airoha_phy_clear_bits((pcie_phy)->pma1 + (reg), (mask))
-+#define airoha_phy_pma0_set_bits(pcie_phy, reg, mask)                         \
-+      airoha_phy_set_bits((pcie_phy)->pma0 + (reg), (mask))
-+#define airoha_phy_pma1_set_bits(pcie_phy, reg, mask)                         \
-+      airoha_phy_set_bits((pcie_phy)->pma1 + (reg), (mask))
-+#define airoha_phy_pma0_update_field(pcie_phy, reg, mask, val)                        \
-+      airoha_phy_update_field((pcie_phy)->pma0 + (reg), (mask), (val))
-+#define airoha_phy_pma1_update_field(pcie_phy, reg, mask, val)                        \
-+      airoha_phy_update_field((pcie_phy)->pma1 + (reg), (mask), (val))
-+
-+static void
-+airoha_phy_init_lane0_rx_fw_pre_calib(struct airoha_pcie_phy *pcie_phy,
-+                                    enum airoha_pcie_port_gen gen)
-+{
-+      u32 fl_out_target = gen == PCIE_PORT_GEN3 ? 41600 : 41941;
-+      u32 lock_cyclecnt = gen == PCIE_PORT_GEN3 ? 26000 : 32767;
-+      u32 pr_idac, val, cdr_pr_idac_tmp = 0;
-+      int i;
-+
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1,
-+                               PCIE_LCPLL_MAN_PWDB);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2,
-+                                   PCIE_LOCK_TARGET_BEG,
-+                                   fl_out_target - 100);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2,
-+                                   PCIE_LOCK_TARGET_END,
-+                                   fl_out_target + 100);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1,
-+                                   PCIE_PLL_FT_LOCK_CYCLECNT, lock_cyclecnt);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                   PCIE_LOCK_LOCKTH, 0x3);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3,
-+                                   PCIE_UNLOCK_TARGET_BEG,
-+                                   fl_out_target - 100);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3,
-+                                   PCIE_UNLOCK_TARGET_END,
-+                                   fl_out_target + 100);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1,
-+                                   PCIE_PLL_FT_UNLOCK_CYCLECNT,
-+                                   lock_cyclecnt);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                   PCIE_UNLOCK_LOCKTH, 0x3);
-+
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_INJ_MODE,
-+                                 CSR_2L_PXP_CDR0_INJ_FORCE_OFF);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
-+                               PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN);
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
-+                               PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN);
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
-+                               PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN);
-+      airoha_phy_pma0_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
-+                                 PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN);
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                               PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
-+                               PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB);
-+      airoha_phy_pma0_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
-+                                 PCIE_FORCE_DA_PXP_CDR_PR_PWDB);
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
-+                               PCIE_FORCE_DA_PXP_CDR_PR_PWDB);
-+
-+      for (i = 0; i < LEQ_LEN_CTRL_MAX_VAL; i++) {
-+              airoha_phy_pma0_update_field(pcie_phy,
-+                              REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                              PCIE_FORCE_DA_PXP_CDR_PR_IDAC, i << 8);
-+              airoha_phy_pma0_clear_bits(pcie_phy,
-+                                         REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                         PCIE_FREQLOCK_DET_EN);
-+              airoha_phy_pma0_update_field(pcie_phy,
-+                                           REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                           PCIE_FREQLOCK_DET_EN, 0x3);
-+
-+              usleep_range(10000, 15000);
-+
-+              val = FIELD_GET(PCIE_RO_FL_OUT,
-+                              readl(pcie_phy->pma0 +
-+                                    REG_PCIE_PMA_RO_RX_FREQDET));
-+              if (val > fl_out_target)
-+                      cdr_pr_idac_tmp = i << 8;
-+      }
-+
-+      for (i = LEQ_LEN_CTRL_MAX_VAL; i >= 0; i--) {
-+              pr_idac = cdr_pr_idac_tmp | (0x1 << i);
-+              airoha_phy_pma0_update_field(pcie_phy,
-+                              REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                              PCIE_FORCE_DA_PXP_CDR_PR_IDAC, pr_idac);
-+              airoha_phy_pma0_clear_bits(pcie_phy,
-+                                         REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                         PCIE_FREQLOCK_DET_EN);
-+              airoha_phy_pma0_update_field(pcie_phy,
-+                                           REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                           PCIE_FREQLOCK_DET_EN, 0x3);
-+
-+              usleep_range(10000, 15000);
-+
-+              val = FIELD_GET(PCIE_RO_FL_OUT,
-+                              readl(pcie_phy->pma0 +
-+                                    REG_PCIE_PMA_RO_RX_FREQDET));
-+              if (val < fl_out_target)
-+                      pr_idac &= ~(0x1 << i);
-+
-+              cdr_pr_idac_tmp = pr_idac;
-+      }
-+
-+      airoha_phy_pma0_update_field(pcie_phy,
-+                                   REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                                   PCIE_FORCE_DA_PXP_CDR_PR_IDAC,
-+                                   cdr_pr_idac_tmp);
-+
-+      for (i = 0; i < FREQ_LOCK_MAX_ATTEMPT; i++) {
-+              u32 val;
-+
-+              airoha_phy_pma0_clear_bits(pcie_phy,
-+                                         REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                         PCIE_FREQLOCK_DET_EN);
-+              airoha_phy_pma0_update_field(pcie_phy,
-+                                           REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                           PCIE_FREQLOCK_DET_EN, 0x3);
-+
-+              usleep_range(10000, 15000);
-+
-+              val = readl(pcie_phy->pma0 + REG_PCIE_PMA_RO_RX_FREQDET);
-+              if (val & PCIE_RO_FBCK_LOCK)
-+                      break;
-+      }
-+
-+      /* turn off force mode and update band values */
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_INJ_MODE,
-+                                   CSR_2L_PXP_CDR0_INJ_FORCE_OFF);
-+
-+      airoha_phy_pma0_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
-+                                 PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN);
-+      airoha_phy_pma0_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
-+                                 PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN);
-+      airoha_phy_pma0_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
-+                                 PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB);
-+      airoha_phy_pma0_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                                 PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC);
-+      if (gen == PCIE_PORT_GEN3) {
-+              airoha_phy_pma0_update_field(pcie_phy,
-+                                           REG_PCIE_PMA_DIG_RESERVE_14,
-+                                           PCIE_FLL_IDAC_PCIEG3,
-+                                           cdr_pr_idac_tmp);
-+      } else {
-+              airoha_phy_pma0_update_field(pcie_phy,
-+                                           REG_PCIE_PMA_DIG_RESERVE_13,
-+                                           PCIE_FLL_IDAC_PCIEG1,
-+                                           cdr_pr_idac_tmp);
-+              airoha_phy_pma0_update_field(pcie_phy,
-+                                           REG_PCIE_PMA_DIG_RESERVE_13,
-+                                           PCIE_FLL_IDAC_PCIEG2,
-+                                           cdr_pr_idac_tmp);
-+      }
-+}
-+
-+static void
-+airoha_phy_init_lane1_rx_fw_pre_calib(struct airoha_pcie_phy *pcie_phy,
-+                                    enum airoha_pcie_port_gen gen)
-+{
-+      u32 fl_out_target = gen == PCIE_PORT_GEN3 ? 41600 : 41941;
-+      u32 lock_cyclecnt = gen == PCIE_PORT_GEN3 ? 26000 : 32767;
-+      u32 pr_idac, val, cdr_pr_idac_tmp = 0;
-+      int i;
-+
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1,
-+                               PCIE_LCPLL_MAN_PWDB);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2,
-+                                   PCIE_LOCK_TARGET_BEG,
-+                                   fl_out_target - 100);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2,
-+                                   PCIE_LOCK_TARGET_END,
-+                                   fl_out_target + 100);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1,
-+                                   PCIE_PLL_FT_LOCK_CYCLECNT, lock_cyclecnt);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                   PCIE_LOCK_LOCKTH, 0x3);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3,
-+                                   PCIE_UNLOCK_TARGET_BEG,
-+                                   fl_out_target - 100);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3,
-+                                   PCIE_UNLOCK_TARGET_END,
-+                                   fl_out_target + 100);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1,
-+                                   PCIE_PLL_FT_UNLOCK_CYCLECNT,
-+                                   lock_cyclecnt);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                   PCIE_UNLOCK_LOCKTH, 0x3);
-+
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_INJ_MODE,
-+                                 CSR_2L_PXP_CDR1_INJ_FORCE_OFF);
-+
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
-+                               PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
-+                               PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
-+                               PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN);
-+      airoha_phy_pma1_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
-+                                 PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                               PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
-+                               PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB);
-+      airoha_phy_pma1_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
-+                                 PCIE_FORCE_DA_PXP_CDR_PR_PWDB);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
-+                               PCIE_FORCE_DA_PXP_CDR_PR_PWDB);
-+
-+      for (i = 0; i < LEQ_LEN_CTRL_MAX_VAL; i++) {
-+              airoha_phy_pma1_update_field(pcie_phy,
-+                              REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                              PCIE_FORCE_DA_PXP_CDR_PR_IDAC, i << 8);
-+              airoha_phy_pma1_clear_bits(pcie_phy,
-+                                         REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                         PCIE_FREQLOCK_DET_EN);
-+              airoha_phy_pma1_update_field(pcie_phy,
-+                                           REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                           PCIE_FREQLOCK_DET_EN, 0x3);
-+
-+              usleep_range(10000, 15000);
-+
-+              val = FIELD_GET(PCIE_RO_FL_OUT,
-+                              readl(pcie_phy->pma1 +
-+                                    REG_PCIE_PMA_RO_RX_FREQDET));
-+              if (val > fl_out_target)
-+                      cdr_pr_idac_tmp = i << 8;
-+      }
-+
-+      for (i = LEQ_LEN_CTRL_MAX_VAL; i >= 0; i--) {
-+              pr_idac = cdr_pr_idac_tmp | (0x1 << i);
-+              airoha_phy_pma1_update_field(pcie_phy,
-+                              REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                              PCIE_FORCE_DA_PXP_CDR_PR_IDAC, pr_idac);
-+              airoha_phy_pma1_clear_bits(pcie_phy,
-+                                         REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                         PCIE_FREQLOCK_DET_EN);
-+              airoha_phy_pma1_update_field(pcie_phy,
-+                                           REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                           PCIE_FREQLOCK_DET_EN, 0x3);
-+
-+              usleep_range(10000, 15000);
-+
-+              val = FIELD_GET(PCIE_RO_FL_OUT,
-+                              readl(pcie_phy->pma1 +
-+                                    REG_PCIE_PMA_RO_RX_FREQDET));
-+              if (val < fl_out_target)
-+                      pr_idac &= ~(0x1 << i);
-+
-+              cdr_pr_idac_tmp = pr_idac;
-+      }
-+
-+      airoha_phy_pma1_update_field(pcie_phy,
-+                                   REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                                   PCIE_FORCE_DA_PXP_CDR_PR_IDAC,
-+                                   cdr_pr_idac_tmp);
-+
-+      for (i = 0; i < FREQ_LOCK_MAX_ATTEMPT; i++) {
-+              u32 val;
-+
-+              airoha_phy_pma1_clear_bits(pcie_phy,
-+                                         REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                         PCIE_FREQLOCK_DET_EN);
-+              airoha_phy_pma1_update_field(pcie_phy,
-+                                           REG_PCIE_PMA_SS_RX_FREQ_DET4,
-+                                           PCIE_FREQLOCK_DET_EN, 0x3);
-+
-+              usleep_range(10000, 15000);
-+
-+              val = readl(pcie_phy->pma1 + REG_PCIE_PMA_RO_RX_FREQDET);
-+              if (val & PCIE_RO_FBCK_LOCK)
-+                      break;
-+      }
-+
-+      /* turn off force mode and update band values */
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_INJ_MODE,
-+                                   CSR_2L_PXP_CDR1_INJ_FORCE_OFF);
-+
-+      airoha_phy_pma1_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
-+                                 PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN);
-+      airoha_phy_pma1_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
-+                                 PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN);
-+      airoha_phy_pma1_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
-+                                 PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB);
-+      airoha_phy_pma1_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                                 PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC);
-+      if (gen == PCIE_PORT_GEN3) {
-+              airoha_phy_pma1_update_field(pcie_phy,
-+                                           REG_PCIE_PMA_DIG_RESERVE_14,
-+                                           PCIE_FLL_IDAC_PCIEG3,
-+                                           cdr_pr_idac_tmp);
-+      } else {
-+              airoha_phy_pma1_update_field(pcie_phy,
-+                                           REG_PCIE_PMA_DIG_RESERVE_13,
-+                                           PCIE_FLL_IDAC_PCIEG1,
-+                                           cdr_pr_idac_tmp);
-+              airoha_phy_pma1_update_field(pcie_phy,
-+                                           REG_PCIE_PMA_DIG_RESERVE_13,
-+                                           PCIE_FLL_IDAC_PCIEG2,
-+                                           cdr_pr_idac_tmp);
-+      }
-+}
-+
-+static void airoha_pcie_phy_init_default(struct airoha_pcie_phy *pcie_phy)
-+{
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CMN,
-+                                     CSR_2L_PXP_CMN_TRIM_MASK, 0x10);
-+      writel(0xcccbcccb, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_21);
-+      writel(0xcccb, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_22);
-+      writel(0xcccbcccb, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_21);
-+      writel(0xcccb, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_22);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CMN,
-+                                 CSR_2L_PXP_CMN_LANE_EN);
-+}
-+
-+static void airoha_pcie_phy_init_clk_out(struct airoha_pcie_phy *pcie_phy)
-+{
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_TXPLL_POSTDIV_D256,
-+                                     CSR_2L_PXP_CLKTX0_AMP, 0x5);
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_CLKTX0_FORCE_OUT1,
-+                                     CSR_2L_PXP_CLKTX1_AMP, 0x5);
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_TXPLL_POSTDIV_D256,
-+                                     CSR_2L_PXP_CLKTX0_OFFSET, 0x2);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET,
-+                                     CSR_2L_PXP_CLKTX1_OFFSET, 0x2);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX0_FORCE_OUT1,
-+                                   CSR_2L_PXP_CLKTX0_HZ);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET,
-+                                   CSR_2L_PXP_CLKTX1_HZ);
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_CLKTX0_FORCE_OUT1,
-+                                     CSR_2L_PXP_CLKTX0_IMP_SEL, 0x12);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CLKTX1_IMP_SEL,
-+                                     CSR_2L_PXP_CLKTX1_IMP_SEL, 0x12);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV_D256,
-+                                   CSR_2L_PXP_CLKTX0_SR);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET,
-+                                   CSR_2L_PXP_CLKTX1_SR);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_PLL_CMN_RESERVE0,
-+                                     CSR_2L_PXP_PLL_RESERVE_MASK, 0xdd);
-+}
-+
-+static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy)
-+{
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
-+                               PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST |
-+                               PCIE_SW_RX_RST);
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
-+                               PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST |
-+                               PCIE_SW_RX_RST);
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET,
-+                               PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET);
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET,
-+                               PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET);
-+}
-+
-+static void airoha_pcie_phy_init_rx(struct airoha_pcie_phy *pcie_phy)
-+{
-+      writel(0x2a00090b, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_17);
-+      writel(0x2a00090b, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_17);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_MONPI,
-+                                 CSR_2L_PXP_CDR0_PR_XFICK_EN);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_MONPI,
-+                                 CSR_2L_PXP_CDR1_PR_XFICK_EN);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy,
-+                                   REG_CSR_2L_CDR0_PD_PICAL_CKD8_INV,
-+                                   CSR_2L_PXP_CDR0_PD_EDGE_DISABLE);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy,
-+                                   REG_CSR_2L_CDR1_PD_PICAL_CKD8_INV,
-+                                   CSR_2L_PXP_CDR1_PD_EDGE_DISABLE);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_PHYCK_DIV,
-+                                     CSR_2L_PXP_RX0_PHYCK_SEL, 0x1);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_PHYCK_DIV,
-+                                     CSR_2L_PXP_RX1_PHYCK_SEL, 0x1);
-+}
-+
-+static void airoha_pcie_phy_init_jcpll(struct airoha_pcie_phy *pcie_phy)
-+{
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
-+                               PCIE_FORCE_SEL_DA_PXP_JCPLL_EN);
-+      airoha_phy_pma0_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
-+                                 PCIE_FORCE_DA_PXP_JCPLL_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
-+                               PCIE_FORCE_SEL_DA_PXP_JCPLL_EN);
-+      airoha_phy_pma1_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
-+                                 PCIE_FORCE_DA_PXP_JCPLL_EN);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_TCL_VTP_EN,
-+                                     CSR_2L_PXP_JCPLL_SPARE_LOW, 0x20);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
-+                                 CSR_2L_PXP_JCPLL_RST);
-+      writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_JCPLL_SSC_DELTA1);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC_PERIOD,
-+                                   CSR_2L_PXP_JCPLL_SSC_PERIOD);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
-+                                   CSR_2L_PXP_JCPLL_SSC_PHASE_INI);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
-+                                   CSR_2L_PXP_JCPLL_SSC_TRI_EN);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR,
-+                                     CSR_2L_PXP_JCPLL_LPF_BR, 0xa);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR,
-+                                     CSR_2L_PXP_JCPLL_LPF_BP, 0xc);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR,
-+                                     CSR_2L_PXP_JCPLL_LPF_BC, 0x1f);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC,
-+                                     CSR_2L_PXP_JCPLL_LPF_BWC, 0x1e);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR,
-+                                     CSR_2L_PXP_JCPLL_LPF_BWR, 0xa);
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_JCPLL_MMD_PREDIV_MODE,
-+                                     CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE,
-+                                     0x1);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, CSR_2L_PXP_JCPLL_MONCK,
-+                                   CSR_2L_PXP_JCPLL_REFIN_DIV);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS,
-+                               PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW);
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS,
-+                               PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW);
-+      airoha_phy_pma0_update_field(pcie_phy,
-+                                   REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW,
-+                                   PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW,
-+                                   0x50000000);
-+      airoha_phy_pma1_update_field(pcie_phy,
-+                                   REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW,
-+                                   PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW,
-+                                   0x50000000);
-+
-+      airoha_phy_csr_2l_set_bits(pcie_phy,
-+                                 REG_CSR_2L_JCPLL_MMD_PREDIV_MODE,
-+                                 CSR_2L_PXP_JCPLL_POSTDIV_D5);
-+      airoha_phy_csr_2l_set_bits(pcie_phy,
-+                                 REG_CSR_2L_JCPLL_MMD_PREDIV_MODE,
-+                                 CSR_2L_PXP_JCPLL_POSTDIV_D2);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
-+                                     CSR_2L_PXP_JCPLL_RST_DLY, 0x4);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
-+                                   CSR_2L_PXP_JCPLL_SDM_DI_LS);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_TCL_KBAND_VREF,
-+                                   CSR_2L_PXP_JCPLL_VCO_KBAND_MEAS_EN);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT,
-+                                   CSR_2L_PXP_JCPLL_CHP_IOFST);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT,
-+                                     CSR_2L_PXP_JCPLL_CHP_IBIAS, 0xc);
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_JCPLL_MMD_PREDIV_MODE,
-+                                     CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE,
-+                                     0x1);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_VCODIV,
-+                                 CSR_2L_PXP_JCPLL_VCO_HALFLSB_EN);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCODIV,
-+                                     CSR_2L_PXP_JCPLL_VCO_CFIX, 0x1);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCODIV,
-+                                     CSR_2L_PXP_JCPLL_VCO_SCAPWR, 0x4);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT,
-+                                   REG_CSR_2L_JCPLL_LPF_SHCK_EN);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC,
-+                                 CSR_2L_PXP_JCPLL_POSTDIV_EN);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC,
-+                                   CSR_2L_PXP_JCPLL_KBAND_KFC);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC,
-+                                     CSR_2L_PXP_JCPLL_KBAND_KF, 0x3);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC,
-+                                   CSR_2L_PXP_JCPLL_KBAND_KS);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC,
-+                                     CSR_2L_PXP_JCPLL_KBAND_DIV, 0x1);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SCAN_MODE,
-+                               PCIE_FORCE_SEL_DA_PXP_JCPLL_KBAND_LOAD_EN);
-+      airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SCAN_MODE,
-+                                 PCIE_FORCE_DA_PXP_JCPLL_KBAND_LOAD_EN);
-+
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC,
-+                                     CSR_2L_PXP_JCPLL_KBAND_CODE, 0xe4);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
-+                                 CSR_2L_PXP_JCPLL_TCL_AMP_EN);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_TCL_CMP,
-+                                 CSR_2L_PXP_JCPLL_TCL_LPF_EN);
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_JCPLL_TCL_KBAND_VREF,
-+                                     CSR_2L_PXP_JCPLL_TCL_KBAND_VREF, 0xf);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
-+                                     CSR_2L_PXP_JCPLL_TCL_AMP_GAIN, 0x1);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
-+                                     CSR_2L_PXP_JCPLL_TCL_AMP_VREF, 0x5);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_TCL_CMP,
-+                                     CSR_2L_PXP_JCPLL_TCL_LPF_BW, 0x1);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCO_TCLVAR,
-+                                     CSR_2L_PXP_JCPLL_VCO_TCLVAR, 0x3);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
-+                               PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN);
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
-+                               PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
-+                               PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
-+                               PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN);
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
-+                               PCIE_FORCE_SEL_DA_PXP_JCPLL_EN);
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
-+                               PCIE_FORCE_DA_PXP_JCPLL_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
-+                               PCIE_FORCE_SEL_DA_PXP_JCPLL_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
-+                               PCIE_FORCE_DA_PXP_JCPLL_EN);
-+}
-+
-+static void airoha_pcie_phy_txpll(struct airoha_pcie_phy *pcie_phy)
-+{
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
-+                               PCIE_FORCE_SEL_DA_PXP_TXPLL_EN);
-+      airoha_phy_pma0_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
-+                                 PCIE_FORCE_DA_PXP_TXPLL_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
-+                               PCIE_FORCE_SEL_DA_PXP_TXPLL_EN);
-+      airoha_phy_pma1_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
-+                                 PCIE_FORCE_DA_PXP_TXPLL_EN);
-+
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV,
-+                                 CSR_2L_PXP_TXPLL_PLL_RSTB);
-+      writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_TXPLL_SSC_DELTA1);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC_PERIOD,
-+                                   CSR_2L_PXP_txpll_SSC_PERIOD);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
-+                                     CSR_2L_PXP_TXPLL_CHP_IOFST, 0x1);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_750M_SYS_CK,
-+                                     CSR_2L_PXP_TXPLL_CHP_IBIAS, 0x2d);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV,
-+                                   CSR_2L_PXP_TXPLL_REFIN_DIV);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW,
-+                                     CSR_2L_PXP_TXPLL_VCO_CFIX, 0x3);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                               PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW);
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                               PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW);
-+      airoha_phy_pma0_update_field(pcie_phy,
-+                                   REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW,
-+                                   PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW,
-+                                   0xc800000);
-+      airoha_phy_pma1_update_field(pcie_phy,
-+                                   REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW,
-+                                   PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW,
-+                                   0xc800000);
-+
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS,
-+                                   CSR_2L_PXP_TXPLL_SDM_IFM);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC,
-+                                   CSR_2L_PXP_TXPLL_SSC_PHASE_INI);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV,
-+                                     CSR_2L_PXP_TXPLL_RST_DLY, 0x4);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS,
-+                                   CSR_2L_PXP_TXPLL_SDM_DI_LS);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS,
-+                                     CSR_2L_PXP_TXPLL_SDM_ORD, 0x3);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_KBAND_VREF,
-+                                   CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN);
-+      writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_TXPLL_SSC_DELTA1);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
-+                                     CSR_2L_PXP_TXPLL_LPF_BP, 0x1);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
-+                                     CSR_2L_PXP_TXPLL_LPF_BC, 0x18);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
-+                                     CSR_2L_PXP_TXPLL_LPF_BR, 0x5);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
-+                                     CSR_2L_PXP_TXPLL_CHP_IOFST, 0x1);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_750M_SYS_CK,
-+                                     CSR_2L_PXP_TXPLL_CHP_IBIAS, 0x2d);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_VTP,
-+                                     CSR_2L_PXP_TXPLL_SPARE_L, 0x1);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR,
-+                                   CSR_2L_PXP_TXPLL_LPF_BWC);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV,
-+                                   CSR_2L_PXP_TXPLL_MMD_PREDIV_MODE);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV,
-+                                   CSR_2L_PXP_TXPLL_REFIN_DIV);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW,
-+                                 CSR_2L_PXP_TXPLL_VCO_HALFLSB_EN);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_VCO_SCAPWR,
-+                                     CSR_2L_PXP_TXPLL_VCO_SCAPWR, 0x7);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW,
-+                                     CSR_2L_PXP_TXPLL_VCO_CFIX, 0x3);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                               PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
-+                               PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW);
-+
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC,
-+                                   CSR_2L_PXP_TXPLL_SSC_PHASE_INI);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR,
-+                                   CSR_2L_PXP_TXPLL_LPF_BWR);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_PHY_CK2,
-+                                 CSR_2L_PXP_TXPLL_REFIN_INTERNAL);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_KBAND_VREF,
-+                                   CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_VTP,
-+                                   CSR_2L_PXP_TXPLL_VTP_EN);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV,
-+                                   CSR_2L_PXP_TXPLL_PHY_CK1_EN);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_PHY_CK2,
-+                                 CSR_2L_PXP_TXPLL_REFIN_INTERNAL);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC,
-+                                   CSR_2L_PXP_TXPLL_SSC_EN);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_750M_SYS_CK,
-+                                   CSR_2L_PXP_TXPLL_LPF_SHCK_EN);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV,
-+                                   CSR_2L_PXP_TXPLL_POSTDIV_EN);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV,
-+                                   CSR_2L_PXP_TXPLL_KBAND_KFC);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV,
-+                                     CSR_2L_PXP_TXPLL_KBAND_KF, 0x3);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV,
-+                                     CSR_2L_PXP_txpll_KBAND_KS, 0x1);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV,
-+                                     CSR_2L_PXP_TXPLL_KBAND_DIV, 0x4);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR,
-+                                     CSR_2L_PXP_TXPLL_KBAND_CODE, 0xe4);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_OUT,
-+                                 CSR_2L_PXP_TXPLL_TCL_AMP_EN);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_AMP_VREF,
-+                                 CSR_2L_PXP_TXPLL_TCL_LPF_EN);
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_TXPLL_TCL_KBAND_VREF,
-+                                     CSR_2L_PXP_TXPLL_TCL_KBAND_VREF, 0xf);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_SDM_OUT,
-+                                     CSR_2L_PXP_TXPLL_TCL_AMP_GAIN, 0x3);
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_TXPLL_TCL_AMP_VREF,
-+                                     CSR_2L_PXP_TXPLL_TCL_AMP_VREF, 0xb);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW,
-+                                     CSR_2L_PXP_TXPLL_TCL_LPF_BW, 0x3);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
-+                               PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN);
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
-+                               PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
-+                               PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
-+                               PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN);
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
-+                               PCIE_FORCE_SEL_DA_PXP_TXPLL_EN);
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
-+                               PCIE_FORCE_DA_PXP_TXPLL_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
-+                               PCIE_FORCE_SEL_DA_PXP_TXPLL_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
-+                               PCIE_FORCE_DA_PXP_TXPLL_EN);
-+}
-+
-+static void airoha_pcie_phy_init_ssc_jcpll(struct airoha_pcie_phy *pcie_phy)
-+{
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_DELTA1,
-+                                     CSR_2L_PXP_JCPLL_SSC_DELTA1, 0x106);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_DELTA1,
-+                                     CSR_2L_PXP_JCPLL_SSC_DELTA, 0x106);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_PERIOD,
-+                                     CSR_2L_PXP_JCPLL_SSC_PERIOD, 0x31b);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
-+                                 CSR_2L_PXP_JCPLL_SSC_PHASE_INI);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
-+                                 CSR_2L_PXP_JCPLL_SSC_EN);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_IFM,
-+                                 CSR_2L_PXP_JCPLL_SDM_IFM);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
-+                                 REG_CSR_2L_JCPLL_SDM_HREN);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
-+                                   CSR_2L_PXP_JCPLL_SDM_DI_EN);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
-+                                 CSR_2L_PXP_JCPLL_SSC_TRI_EN);
-+}
-+
-+static void
-+airoha_pcie_phy_set_rxlan0_signal_detect(struct airoha_pcie_phy *pcie_phy)
-+{
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_COR_HBW,
-+                                 CSR_2L_PXP_CDR0_PR_LDO_FORCE_ON);
-+
-+      usleep_range(100, 200);
-+
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_19,
-+                                   PCIE_PCP_RX_REV0_PCIE_GEN1, 0x18b0);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20,
-+                                   PCIE_PCP_RX_REV0_PCIE_GEN2, 0x18b0);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20,
-+                                   PCIE_PCP_RX_REV0_PCIE_GEN3, 0x1030);
-+
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_DCTEST,
-+                                     CSR_2L_PXP_RX0_SIGDET_PEAK, 0x2);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_VTH_SEL,
-+                                     CSR_2L_PXP_RX0_SIGDET_VTH_SEL, 0x5);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0,
-+                                     CSR_2L_PXP_VOS_PNINV, 0x2);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_DCTEST,
-+                                     CSR_2L_PXP_RX0_SIGDET_LPF_CTRL, 0x1);
-+
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_CAL2,
-+                                   PCIE_CAL_OUT_OS, 0x0);
-+
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_PXP_RX0_FE_VB_EQ2,
-+                                 CSR_2L_PXP_RX0_FE_VCM_GEN_PWDB);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL,
-+                               PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB);
-+      airoha_phy_pma0_update_field(pcie_phy,
-+                                   REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL,
-+                                   PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 0x3);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_RX_FORCE_MODE0,
-+                                   PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL, 0x1);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_SIGDET0,
-+                                   PCIE_SIGDET_WIN_NONVLD_TIMES, 0x3);
-+      airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SEQUENCE_DISB_CTRL1,
-+                                 PCIE_DISB_RX_SDCAL_EN);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1,
-+                               PCIE_FORCE_RX_SDCAL_EN);
-+      usleep_range(150, 200);
-+      airoha_phy_pma0_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1,
-+                                 PCIE_FORCE_RX_SDCAL_EN);
-+}
-+
-+static void
-+airoha_pcie_phy_set_rxlan1_signal_detect(struct airoha_pcie_phy *pcie_phy)
-+{
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_COR_HBW,
-+                                 CSR_2L_PXP_CDR1_PR_LDO_FORCE_ON);
-+
-+      usleep_range(100, 200);
-+
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_19,
-+                                   PCIE_PCP_RX_REV0_PCIE_GEN1, 0x18b0);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20,
-+                                   PCIE_PCP_RX_REV0_PCIE_GEN2, 0x18b0);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20,
-+                                   PCIE_PCP_RX_REV0_PCIE_GEN3, 0x1030);
-+
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_SIGDET_NOVTH,
-+                                     CSR_2L_PXP_RX1_SIGDET_PEAK, 0x2);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_SIGDET_NOVTH,
-+                                     CSR_2L_PXP_RX1_SIGDET_VTH_SEL, 0x5);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0,
-+                                     CSR_2L_PXP_VOS_PNINV, 0x2);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_DAC_RANGE_EYE,
-+                                     CSR_2L_PXP_RX1_SIGDET_LPF_CTRL, 0x1);
-+
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_CAL2,
-+                                   PCIE_CAL_OUT_OS, 0x0);
-+
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_FE_VB_EQ1,
-+                                 CSR_2L_PXP_RX1_FE_VCM_GEN_PWDB);
-+
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL,
-+                               PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB);
-+      airoha_phy_pma1_update_field(pcie_phy,
-+                                   REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL,
-+                                   PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 0x3);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_RX_FORCE_MODE0,
-+                                   PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL, 0x1);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_SIGDET0,
-+                                   PCIE_SIGDET_WIN_NONVLD_TIMES, 0x3);
-+      airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SEQUENCE_DISB_CTRL1,
-+                                 PCIE_DISB_RX_SDCAL_EN);
-+
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1,
-+                               PCIE_FORCE_RX_SDCAL_EN);
-+      usleep_range(150, 200);
-+      airoha_phy_pma1_clear_bits(pcie_phy,
-+                                 REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1,
-+                                 PCIE_FORCE_RX_SDCAL_EN);
-+}
-+
-+static void airoha_pcie_phy_set_rxflow(struct airoha_pcie_phy *pcie_phy)
-+{
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST,
-+                               PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB |
-+                               PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST,
-+                               PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB |
-+                               PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB,
-+                               PCIE_FORCE_DA_PXP_CDR_PD_PWDB |
-+                               PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB);
-+      airoha_phy_pma0_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB,
-+                               PCIE_FORCE_DA_PXP_RX_FE_PWDB |
-+                               PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB,
-+                               PCIE_FORCE_DA_PXP_CDR_PD_PWDB |
-+                               PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB);
-+      airoha_phy_pma1_set_bits(pcie_phy,
-+                               REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB,
-+                               PCIE_FORCE_DA_PXP_RX_FE_PWDB |
-+                               PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB);
-+
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX0_PHYCK_DIV,
-+                                 CSR_2L_PXP_RX0_PHYCK_RSTB |
-+                                 CSR_2L_PXP_RX0_TDC_CK_SEL);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_PHYCK_DIV,
-+                                 CSR_2L_PXP_RX1_PHYCK_RSTB |
-+                                 CSR_2L_PXP_RX1_TDC_CK_SEL);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
-+                               PCIE_SW_RX_FIFO_RST | PCIE_SW_TX_RST |
-+                               PCIE_SW_PMA_RST | PCIE_SW_ALLPCS_RST |
-+                               PCIE_SW_TX_FIFO_RST);
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
-+                               PCIE_SW_RX_FIFO_RST | PCIE_SW_TX_RST |
-+                               PCIE_SW_PMA_RST | PCIE_SW_ALLPCS_RST |
-+                               PCIE_SW_TX_FIFO_RST);
-+
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_PXP_RX0_FE_VB_EQ2,
-+                                 CSR_2L_PXP_RX0_FE_VB_EQ2_EN |
-+                                 CSR_2L_PXP_RX0_FE_VB_EQ3_EN);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX0_SIGDET_VTH_SEL,
-+                                 CSR_2L_PXP_RX0_FE_VB_EQ1_EN);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_FE_VB_EQ1,
-+                                 CSR_2L_PXP_RX1_FE_VB_EQ1_EN |
-+                                 CSR_2L_PXP_RX1_FE_VB_EQ2_EN |
-+                                 CSR_2L_PXP_RX1_FE_VB_EQ3_EN);
-+
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0,
-+                                     CSR_2L_PXP_FE_GAIN_NORMAL_MODE, 0x4);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0,
-+                                     CSR_2L_PXP_FE_GAIN_TRAIN_MODE, 0x4);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0,
-+                                     CSR_2L_PXP_FE_GAIN_NORMAL_MODE, 0x4);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0,
-+                                     CSR_2L_PXP_FE_GAIN_TRAIN_MODE, 0x4);
-+}
-+
-+static void airoha_pcie_phy_set_pr(struct airoha_pcie_phy *pcie_phy)
-+{
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_VREG_IBAND,
-+                                     CSR_2L_PXP_CDR0_PR_VREG_IBAND, 0x5);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_VREG_IBAND,
-+                                     CSR_2L_PXP_CDR0_PR_VREG_CKBUF, 0x5);
-+
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_CKREF_DIV,
-+                                   CSR_2L_PXP_CDR0_PR_CKREF_DIV);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_COR_HBW,
-+                                   CSR_2L_PXP_CDR0_PR_CKREF_DIV1);
-+
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL,
-+                                     CSR_2L_PXP_CDR1_PR_VREG_IBAND, 0x5);
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL,
-+                                     CSR_2L_PXP_CDR1_PR_VREG_CKBUF, 0x5);
-+
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_CKREF_DIV,
-+                                   CSR_2L_PXP_CDR1_PR_CKREF_DIV);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_COR_HBW,
-+                                   CSR_2L_PXP_CDR1_PR_CKREF_DIV1);
-+
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_LPF_RATIO,
-+                                     CSR_2L_PXP_CDR0_LPF_TOP_LIM, 0x20000);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_LPF_RATIO,
-+                                     CSR_2L_PXP_CDR1_LPF_TOP_LIM, 0x20000);
-+
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_BETA_DAC,
-+                                     CSR_2L_PXP_CDR0_PR_BETA_SEL, 0x2);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_BETA_DAC,
-+                                     CSR_2L_PXP_CDR1_PR_BETA_SEL, 0x2);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_BETA_DAC,
-+                                     CSR_2L_PXP_CDR0_PR_KBAND_DIV, 0x4);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_BETA_DAC,
-+                                     CSR_2L_PXP_CDR1_PR_KBAND_DIV, 0x4);
-+}
-+
-+static void airoha_pcie_phy_set_txflow(struct airoha_pcie_phy *pcie_phy)
-+{
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX0_CKLDO,
-+                                 CSR_2L_PXP_TX0_CKLDO_EN);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX1_CKLDO,
-+                                 CSR_2L_PXP_TX1_CKLDO_EN);
-+
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX0_CKLDO,
-+                                 CSR_2L_PXP_TX0_DMEDGEGEN_EN);
-+      airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX1_CKLDO,
-+                                 CSR_2L_PXP_TX1_DMEDGEGEN_EN);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TX1_MULTLANE,
-+                                   CSR_2L_PXP_TX1_MULTLANE_EN);
-+}
-+
-+static void airoha_pcie_phy_set_rx_mode(struct airoha_pcie_phy *pcie_phy)
-+{
-+      writel(0x804000, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_27);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
-+                                   PCIE_PXP_RX_VTH_SEL_PCIE_G1, 0x5);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
-+                                   PCIE_PXP_RX_VTH_SEL_PCIE_G2, 0x5);
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
-+                                   PCIE_PXP_RX_VTH_SEL_PCIE_G3, 0x5);
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_30,
-+                               0x77700);
-+
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_MONCK,
-+                                   CSR_2L_PXP_CDR0_PR_MONCK_ENABLE);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_MONCK,
-+                                     CSR_2L_PXP_CDR0_PR_RESERVE0, 0x2);
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_PXP_RX0_OSCAL_CTLE1IOS,
-+                                     CSR_2L_PXP_RX0_PR_OSCAL_VGA1IOS, 0x19);
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS,
-+                                     CSR_2L_PXP_RX0_PR_OSCAL_VGA1VOS, 0x19);
-+      airoha_phy_csr_2l_update_field(pcie_phy,
-+                                     REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS,
-+                                     CSR_2L_PXP_RX0_PR_OSCAL_VGA2IOS, 0x14);
-+
-+      writel(0x804000, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_27);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
-+                                   PCIE_PXP_RX_VTH_SEL_PCIE_G1, 0x5);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
-+                                   PCIE_PXP_RX_VTH_SEL_PCIE_G2, 0x5);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
-+                                   PCIE_PXP_RX_VTH_SEL_PCIE_G3, 0x5);
-+
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_30,
-+                               0x77700);
-+
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_MONCK,
-+                                   CSR_2L_PXP_CDR1_PR_MONCK_ENABLE);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_MONCK,
-+                                     CSR_2L_PXP_CDR1_PR_RESERVE0, 0x2);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS,
-+                                     CSR_2L_PXP_RX1_PR_OSCAL_VGA1IOS, 0x19);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS,
-+                                     CSR_2L_PXP_RX1_PR_OSCAL_VGA1VOS, 0x19);
-+      airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS,
-+                                     CSR_2L_PXP_RX1_PR_OSCAL_VGA2IOS, 0x14);
-+}
-+
-+static void airoha_pcie_phy_load_kflow(struct airoha_pcie_phy *pcie_phy)
-+{
-+      airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12,
-+                                   PCIE_FORCE_PMA_RX_SPEED, 0xa);
-+      airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12,
-+                                   PCIE_FORCE_PMA_RX_SPEED, 0xa);
-+      airoha_phy_init_lane0_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN3);
-+      airoha_phy_init_lane1_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN3);
-+
-+      airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12,
-+                                 PCIE_FORCE_PMA_RX_SPEED);
-+      airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12,
-+                                 PCIE_FORCE_PMA_RX_SPEED);
-+      usleep_range(100, 200);
-+
-+      airoha_phy_init_lane0_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN2);
-+      airoha_phy_init_lane1_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN2);
-+}
-+
-+/**
-+ * airoha_pcie_phy_init() - Initialize the phy
-+ * @phy: the phy to be initialized
-+ *
-+ * Initialize the phy registers.
-+ * The hardware settings will be reset during suspend, it should be
-+ * reinitialized when the consumer calls phy_init() again on resume.
-+ */
-+static int airoha_pcie_phy_init(struct phy *phy)
-+{
-+      struct airoha_pcie_phy *pcie_phy = phy_get_drvdata(phy);
-+
-+      /* enable load FLL-K flow */
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_14,
-+                               PCIE_FLL_LOAD_EN);
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_14,
-+                               PCIE_FLL_LOAD_EN);
-+
-+      airoha_pcie_phy_init_default(pcie_phy);
-+      airoha_pcie_phy_init_clk_out(pcie_phy);
-+      airoha_pcie_phy_init_csr_2l(pcie_phy);
-+
-+      usleep_range(100, 200);
-+
-+      airoha_pcie_phy_init_rx(pcie_phy);
-+      /* phase 1, no ssc for K TXPLL */
-+      airoha_pcie_phy_init_jcpll(pcie_phy);
-+
-+      usleep_range(500, 600);
-+
-+      /* TX PLL settings */
-+      airoha_pcie_phy_txpll(pcie_phy);
-+
-+      usleep_range(200, 300);
-+
-+      /* SSC JCPLL setting */
-+      airoha_pcie_phy_init_ssc_jcpll(pcie_phy);
-+
-+      usleep_range(100, 200);
-+
-+      /* Rx lan0 signal detect */
-+      airoha_pcie_phy_set_rxlan0_signal_detect(pcie_phy);
-+      /* Rx lan1 signal detect */
-+      airoha_pcie_phy_set_rxlan1_signal_detect(pcie_phy);
-+      /* RX FLOW */
-+      airoha_pcie_phy_set_rxflow(pcie_phy);
-+
-+      usleep_range(100, 200);
-+
-+      airoha_pcie_phy_set_pr(pcie_phy);
-+      /* TX FLOW */
-+      airoha_pcie_phy_set_txflow(pcie_phy);
-+
-+      usleep_range(100, 200);
-+      /* RX mode setting */
-+      airoha_pcie_phy_set_rx_mode(pcie_phy);
-+      /* Load K-Flow */
-+      airoha_pcie_phy_load_kflow(pcie_phy);
-+      airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
-+                                 PCIE_DA_XPON_CDR_PR_PWDB);
-+      airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
-+                                 PCIE_DA_XPON_CDR_PR_PWDB);
-+
-+      usleep_range(100, 200);
-+
-+      airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
-+                               PCIE_DA_XPON_CDR_PR_PWDB);
-+      airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
-+                               PCIE_DA_XPON_CDR_PR_PWDB);
-+
-+      usleep_range(100, 200);
-+
-+      return 0;
-+}
-+
-+static int airoha_pcie_phy_exit(struct phy *phy)
-+{
-+      struct airoha_pcie_phy *pcie_phy = phy_get_drvdata(phy);
-+
-+      airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
-+                                 PCIE_PMA_SW_RST);
-+      airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
-+                                 PCIE_PMA_SW_RST);
-+      airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
-+                                   CSR_2L_PXP_JCPLL_SSC_PHASE_INI |
-+                                   CSR_2L_PXP_JCPLL_SSC_TRI_EN |
-+                                   CSR_2L_PXP_JCPLL_SSC_EN);
-+
-+      return 0;
-+}
-+
-+static const struct phy_ops airoha_pcie_phy_ops = {
-+      .init = airoha_pcie_phy_init,
-+      .exit = airoha_pcie_phy_exit,
-+      .owner = THIS_MODULE,
-+};
-+
-+static int airoha_pcie_phy_probe(struct platform_device *pdev)
-+{
-+      struct airoha_pcie_phy *pcie_phy;
-+      struct device *dev = &pdev->dev;
-+      struct phy_provider *provider;
-+
-+      pcie_phy = devm_kzalloc(dev, sizeof(*pcie_phy), GFP_KERNEL);
-+      if (!pcie_phy)
-+              return -ENOMEM;
-+
-+      pcie_phy->csr_2l = devm_platform_ioremap_resource_byname(pdev, "csr-2l");
-+      if (IS_ERR(pcie_phy->csr_2l))
-+              return dev_err_probe(dev, PTR_ERR(pcie_phy->csr_2l),
-+                                   "Failed to map phy-csr-2l base\n");
-+
-+      pcie_phy->pma0 = devm_platform_ioremap_resource_byname(pdev, "pma0");
-+      if (IS_ERR(pcie_phy->pma0))
-+              return dev_err_probe(dev, PTR_ERR(pcie_phy->pma0),
-+                                   "Failed to map phy-pma0 base\n");
-+
-+      pcie_phy->pma1 = devm_platform_ioremap_resource_byname(pdev, "pma1");
-+      if (IS_ERR(pcie_phy->pma1))
-+              return dev_err_probe(dev, PTR_ERR(pcie_phy->pma1),
-+                                   "Failed to map phy-pma1 base\n");
-+
-+      pcie_phy->phy = devm_phy_create(dev, dev->of_node, &airoha_pcie_phy_ops);
-+      if (IS_ERR(pcie_phy->phy))
-+              return dev_err_probe(dev, PTR_ERR(pcie_phy->phy),
-+                                   "Failed to create PCIe phy\n");
-+
-+      pcie_phy->dev = dev;
-+      phy_set_drvdata(pcie_phy->phy, pcie_phy);
-+
-+      provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-+      if (IS_ERR(provider))
-+              return dev_err_probe(dev, PTR_ERR(provider),
-+                                   "PCIe phy probe failed\n");
-+
-+      return 0;
-+}
-+
-+static const struct of_device_id airoha_pcie_phy_of_match[] = {
-+      { .compatible = "airoha,en7581-pcie-phy" },
-+      { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, airoha_pcie_phy_of_match);
-+
-+static struct platform_driver airoha_pcie_phy_driver = {
-+      .probe  = airoha_pcie_phy_probe,
-+      .driver = {
-+              .name = "airoha-pcie-phy",
-+              .of_match_table = airoha_pcie_phy_of_match,
-+      },
-+};
-+module_platform_driver(airoha_pcie_phy_driver);
-+
-+MODULE_DESCRIPTION("Airoha PCIe PHY driver");
-+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
-+MODULE_LICENSE("GPL");
diff --git a/target/linux/airoha/patches-6.6/023-v6.11-phy-airoha-Add-dtime-and-Rx-AEQ-IO-registers.patch b/target/linux/airoha/patches-6.6/023-v6.11-phy-airoha-Add-dtime-and-Rx-AEQ-IO-registers.patch
deleted file mode 100644 (file)
index 51be766..0000000
+++ /dev/null
@@ -1,112 +0,0 @@
-From 2a011c3c12e8de461fb1fdce85fa38d308c4eb8b Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sat, 29 Jun 2024 19:51:49 +0200
-Subject: [PATCH] phy: airoha: Add dtime and Rx AEQ IO registers
-
-Introduce Tx-Rx detection Time and Rx AEQ training mappings to
-phy-airoha-pcie driver. This is a preliminary patch to introduce PCIe
-support to En7581 SoC through the mediatek-gen3 PCIe driver.
-This change is not introducing any backward compatibility issue since
-the EN7581 dts is not upstream yet.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Link: https://lore.kernel.org/r/edf3b28926177166c65256604d69f2f576cb6fb3.1719682943.git.lorenzo@kernel.org
-Signed-off-by: Vinod Koul <vkoul@kernel.org>
----
- drivers/phy/phy-airoha-pcie-regs.h | 17 +++++++++++++
- drivers/phy/phy-airoha-pcie.c      | 38 ++++++++++++++++++++++++++++++
- 2 files changed, 55 insertions(+)
-
---- a/drivers/phy/phy-airoha-pcie-regs.h
-+++ b/drivers/phy/phy-airoha-pcie-regs.h
-@@ -474,4 +474,21 @@
- #define REG_PCIE_PMA_DIG_RESERVE_27           0x0908
- #define REG_PCIE_PMA_DIG_RESERVE_30           0x0914
-+/* DTIME */
-+#define REG_PCIE_PEXTP_DIG_GLB44              0x00
-+#define PCIE_XTP_RXDET_VCM_OFF_STB_T_SEL      GENMASK(7, 0)
-+#define PCIE_XTP_RXDET_EN_STB_T_SEL           GENMASK(15, 8)
-+#define PCIE_XTP_RXDET_FINISH_STB_T_SEL               GENMASK(23, 16)
-+#define PCIE_XTP_TXPD_TX_DATA_EN_DLY          GENMASK(27, 24)
-+#define PCIE_XTP_TXPD_RXDET_DONE_CDT          BIT(28)
-+#define PCIE_XTP_RXDET_LATCH_STB_T_SEL                GENMASK(31, 29)
-+
-+/* RX AEQ */
-+#define REG_PCIE_PEXTP_DIG_LN_RX30_P0         0x0000
-+#define PCIE_XTP_LN_RX_PDOWN_L1P2_EXIT_WAIT   GENMASK(7, 0)
-+#define PCIE_XTP_LN_RX_PDOWN_T2RLB_DIG_EN     BIT(8)
-+#define PCIE_XTP_LN_RX_PDOWN_E0_AEQEN_WAIT    GENMASK(31, 16)
-+
-+#define REG_PCIE_PEXTP_DIG_LN_RX30_P1         0x0100
-+
- #endif /* _PHY_AIROHA_PCIE_H */
---- a/drivers/phy/phy-airoha-pcie.c
-+++ b/drivers/phy/phy-airoha-pcie.c
-@@ -31,6 +31,9 @@ enum airoha_pcie_port_gen {
-  * @csr_2l: Analogic lane IO mapped register base address
-  * @pma0: IO mapped register base address of PMA0-PCIe
-  * @pma1: IO mapped register base address of PMA1-PCIe
-+ * @p0_xr_dtime: IO mapped register base address of port0 Tx-Rx detection time
-+ * @p1_xr_dtime: IO mapped register base address of port1 Tx-Rx detection time
-+ * @rx_aeq: IO mapped register base address of Rx AEQ training
-  */
- struct airoha_pcie_phy {
-       struct device *dev;
-@@ -38,6 +41,9 @@ struct airoha_pcie_phy {
-       void __iomem *csr_2l;
-       void __iomem *pma0;
-       void __iomem *pma1;
-+      void __iomem *p0_xr_dtime;
-+      void __iomem *p1_xr_dtime;
-+      void __iomem *rx_aeq;
- };
- static void airoha_phy_clear_bits(void __iomem *reg, u32 mask)
-@@ -1101,6 +1107,21 @@ static void airoha_pcie_phy_load_kflow(s
- static int airoha_pcie_phy_init(struct phy *phy)
- {
-       struct airoha_pcie_phy *pcie_phy = phy_get_drvdata(phy);
-+      u32 val;
-+
-+      /* Setup Tx-Rx detection time */
-+      val = FIELD_PREP(PCIE_XTP_RXDET_VCM_OFF_STB_T_SEL, 0x33) |
-+            FIELD_PREP(PCIE_XTP_RXDET_EN_STB_T_SEL, 0x1) |
-+            FIELD_PREP(PCIE_XTP_RXDET_FINISH_STB_T_SEL, 0x2) |
-+            FIELD_PREP(PCIE_XTP_TXPD_TX_DATA_EN_DLY, 0x3) |
-+            FIELD_PREP(PCIE_XTP_RXDET_LATCH_STB_T_SEL, 0x1);
-+      writel(val, pcie_phy->p0_xr_dtime + REG_PCIE_PEXTP_DIG_GLB44);
-+      writel(val, pcie_phy->p1_xr_dtime + REG_PCIE_PEXTP_DIG_GLB44);
-+      /* Setup Rx AEQ training time */
-+      val = FIELD_PREP(PCIE_XTP_LN_RX_PDOWN_L1P2_EXIT_WAIT, 0x32) |
-+            FIELD_PREP(PCIE_XTP_LN_RX_PDOWN_E0_AEQEN_WAIT, 0x5050);
-+      writel(val, pcie_phy->rx_aeq + REG_PCIE_PEXTP_DIG_LN_RX30_P0);
-+      writel(val, pcie_phy->rx_aeq + REG_PCIE_PEXTP_DIG_LN_RX30_P1);
-       /* enable load FLL-K flow */
-       airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_14,
-@@ -1217,6 +1238,23 @@ static int airoha_pcie_phy_probe(struct
-               return dev_err_probe(dev, PTR_ERR(pcie_phy->phy),
-                                    "Failed to create PCIe phy\n");
-+      pcie_phy->p0_xr_dtime =
-+              devm_platform_ioremap_resource_byname(pdev, "p0-xr-dtime");
-+      if (IS_ERR(pcie_phy->p0_xr_dtime))
-+              return dev_err_probe(dev, PTR_ERR(pcie_phy->p0_xr_dtime),
-+                                   "Failed to map P0 Tx-Rx dtime base\n");
-+
-+      pcie_phy->p1_xr_dtime =
-+              devm_platform_ioremap_resource_byname(pdev, "p1-xr-dtime");
-+      if (IS_ERR(pcie_phy->p1_xr_dtime))
-+              return dev_err_probe(dev, PTR_ERR(pcie_phy->p1_xr_dtime),
-+                                   "Failed to map P1 Tx-Rx dtime base\n");
-+
-+      pcie_phy->rx_aeq = devm_platform_ioremap_resource_byname(pdev, "rx-aeq");
-+      if (IS_ERR(pcie_phy->rx_aeq))
-+              return dev_err_probe(dev, PTR_ERR(pcie_phy->rx_aeq),
-+                                   "Failed to map Rx AEQ base\n");
-+
-       pcie_phy->dev = dev;
-       phy_set_drvdata(pcie_phy->phy, pcie_phy);
diff --git a/target/linux/airoha/patches-6.6/024-v6.12-phy-airoha-adjust-initialization-delay-in-airoha_pci.patch b/target/linux/airoha/patches-6.6/024-v6.12-phy-airoha-adjust-initialization-delay-in-airoha_pci.patch
deleted file mode 100644 (file)
index ff31b23..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-From 7f7315db3d262298ab33d198d3f0b09cabfa7b6b Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 6 Aug 2024 17:55:48 +0200
-Subject: [PATCH] phy: airoha: adjust initialization delay in
- airoha_pcie_phy_init()
-
-Align phy-pcie initialization delay to the vendor sdk in
-airoha_pcie_phy_init routine and allow the hw to complete required
-configuration before proceeding
-
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/8af6f27857619f1e0dd227f08b8584ae8fb22fb2.1722959625.git.lorenzo@kernel.org
-Signed-off-by: Vinod Koul <vkoul@kernel.org>
----
- drivers/phy/phy-airoha-pcie.c | 6 +++++-
- 1 file changed, 5 insertions(+), 1 deletion(-)
-
---- a/drivers/phy/phy-airoha-pcie.c
-+++ b/drivers/phy/phy-airoha-pcie.c
-@@ -18,6 +18,9 @@
- #define LEQ_LEN_CTRL_MAX_VAL  7
- #define FREQ_LOCK_MAX_ATTEMPT 10
-+/* PCIe-PHY initialization time in ms needed by the hw to complete */
-+#define PHY_HW_INIT_TIME_MS   30
-+
- enum airoha_pcie_port_gen {
-       PCIE_PORT_GEN1 = 1,
-       PCIE_PORT_GEN2,
-@@ -1181,7 +1184,8 @@ static int airoha_pcie_phy_init(struct p
-       airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
-                                PCIE_DA_XPON_CDR_PR_PWDB);
--      usleep_range(100, 200);
-+      /* Wait for the PCIe PHY to complete initialization before returning */
-+      msleep(PHY_HW_INIT_TIME_MS);
-       return 0;
- }
diff --git a/target/linux/airoha/patches-6.6/025-01-v6.13-phy-airoha-Fix-REG_CSR_2L_PLL_CMN_RESERVE0-config-in.patch b/target/linux/airoha/patches-6.6/025-01-v6.13-phy-airoha-Fix-REG_CSR_2L_PLL_CMN_RESERVE0-config-in.patch
deleted file mode 100644 (file)
index 271ef01..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-From ca9afde0563a80200eab856a53d7eab28c8fdd90 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 18 Sep 2024 15:32:52 +0200
-Subject: [PATCH 1/4] phy: airoha: Fix REG_CSR_2L_PLL_CMN_RESERVE0 config in
- airoha_pcie_phy_init_clk_out()
-
-Fix typo configuring REG_CSR_2L_PLL_CMN_RESERVE0 register in
-airoha_pcie_phy_init_clk_out routine.
-
-Fixes: d7d2818b9383 ("phy: airoha: Add PCIe PHY driver for EN7581 SoC.")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
----
- drivers/phy/phy-airoha-pcie.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/phy/phy-airoha-pcie.c
-+++ b/drivers/phy/phy-airoha-pcie.c
-@@ -459,7 +459,7 @@ static void airoha_pcie_phy_init_clk_out
-       airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET,
-                                    CSR_2L_PXP_CLKTX1_SR);
-       airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_PLL_CMN_RESERVE0,
--                                     CSR_2L_PXP_PLL_RESERVE_MASK, 0xdd);
-+                                     CSR_2L_PXP_PLL_RESERVE_MASK, 0xd0d);
- }
- static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy)
diff --git a/target/linux/airoha/patches-6.6/025-02-v6.13-phy-airoha-Fix-REG_PCIE_PMA_TX_RESET-config-in-airoh.patch b/target/linux/airoha/patches-6.6/025-02-v6.13-phy-airoha-Fix-REG_PCIE_PMA_TX_RESET-config-in-airoh.patch
deleted file mode 100644 (file)
index 5c90959..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-From 2c2313c84ad7c0e5e39fbd98559d40f6b9ec1f83 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 18 Sep 2024 15:32:53 +0200
-Subject: [PATCH 2/4] phy: airoha: Fix REG_PCIE_PMA_TX_RESET config in
- airoha_pcie_phy_init_csr_2l()
-
-Fix typos configuring REG_PCIE_PMA_TX_RESET register in
-airoha_pcie_phy_init_csr_2l routine for lane0 and lane1
-
-Fixes: d7d2818b9383 ("phy: airoha: Add PCIe PHY driver for EN7581 SoC.")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
----
- drivers/phy/phy-airoha-pcie.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/drivers/phy/phy-airoha-pcie.c
-+++ b/drivers/phy/phy-airoha-pcie.c
-@@ -471,9 +471,9 @@ static void airoha_pcie_phy_init_csr_2l(
-                                PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST |
-                                PCIE_SW_RX_RST);
-       airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET,
--                               PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET);
-+                               PCIE_TX_TOP_RST | PCIE_TX_CAL_RST);
-       airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET,
--                               PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET);
-+                               PCIE_TX_TOP_RST | PCIE_TX_CAL_RST);
- }
- static void airoha_pcie_phy_init_rx(struct airoha_pcie_phy *pcie_phy)
diff --git a/target/linux/airoha/patches-6.6/025-03-v6.13-phy-airoha-Fix-REG_CSR_2L_JCPLL_SDM_HREN-config-in-a.patch b/target/linux/airoha/patches-6.6/025-03-v6.13-phy-airoha-Fix-REG_CSR_2L_JCPLL_SDM_HREN-config-in-a.patch
deleted file mode 100644 (file)
index 8cde5f1..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-From 6e0c349a8a59959c3d3571b5f6776bc2d2ca62bc Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 18 Sep 2024 15:32:54 +0200
-Subject: [PATCH 3/4] phy: airoha: Fix REG_CSR_2L_JCPLL_SDM_HREN config in
- airoha_pcie_phy_init_ssc_jcpll()
-
-Fix typo configuring REG_CSR_2L_JCPLL_SDM_HREN register in
-airoha_pcie_phy_init_ssc_jcpll routine.
-
-Fixes: d7d2818b9383 ("phy: airoha: Add PCIe PHY driver for EN7581 SoC.")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
----
- drivers/phy/phy-airoha-pcie.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/phy/phy-airoha-pcie.c
-+++ b/drivers/phy/phy-airoha-pcie.c
-@@ -802,7 +802,7 @@ static void airoha_pcie_phy_init_ssc_jcp
-       airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_IFM,
-                                  CSR_2L_PXP_JCPLL_SDM_IFM);
-       airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
--                                 REG_CSR_2L_JCPLL_SDM_HREN);
-+                                 CSR_2L_PXP_JCPLL_SDM_HREN);
-       airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
-                                    CSR_2L_PXP_JCPLL_SDM_DI_EN);
-       airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
diff --git a/target/linux/airoha/patches-6.6/025-04-v6.13-phy-airoha-Fix-REG_CSR_2L_RX-0-1-_REV0-definitions.patch b/target/linux/airoha/patches-6.6/025-04-v6.13-phy-airoha-Fix-REG_CSR_2L_RX-0-1-_REV0-definitions.patch
deleted file mode 100644 (file)
index 163aebc..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-From bc1bb265f504ea19ce611a1aec1a40dec409cd15 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 18 Sep 2024 15:32:55 +0200
-Subject: [PATCH 4/4] phy: airoha: Fix REG_CSR_2L_RX{0,1}_REV0 definitions
-
-Fix the following register definitions for REG_CSR_2L_RX{0,1}_REV0
-registers:
-- CSR_2L_PXP_VOS_PNINV
-- CSR_2L_PXP_FE_GAIN_NORMAL_MODE
-- CSR_2L_PXP_FE_GAIN_TRAIN_MODE
-
-Fixes: d7d2818b9383 ("phy: airoha: Add PCIe PHY driver for EN7581 SoC.")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
----
- drivers/phy/phy-airoha-pcie-regs.h | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
---- a/drivers/phy/phy-airoha-pcie-regs.h
-+++ b/drivers/phy/phy-airoha-pcie-regs.h
-@@ -197,9 +197,9 @@
- #define CSR_2L_PXP_TX1_MULTLANE_EN            BIT(0)
- #define REG_CSR_2L_RX0_REV0                   0x00fc
--#define CSR_2L_PXP_VOS_PNINV                  GENMASK(3, 2)
--#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE                GENMASK(6, 4)
--#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE         GENMASK(10, 8)
-+#define CSR_2L_PXP_VOS_PNINV                  GENMASK(19, 18)
-+#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE                GENMASK(22, 20)
-+#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE         GENMASK(26, 24)
- #define REG_CSR_2L_RX0_PHYCK_DIV              0x0100
- #define CSR_2L_PXP_RX0_PHYCK_SEL              GENMASK(9, 8)
diff --git a/target/linux/airoha/patches-6.6/025-v6.10-spi-airoha-add-SPI-NAND-Flash-controller-driver.patch b/target/linux/airoha/patches-6.6/025-v6.10-spi-airoha-add-SPI-NAND-Flash-controller-driver.patch
deleted file mode 100644 (file)
index 417dcc0..0000000
+++ /dev/null
@@ -1,1203 +0,0 @@
-From a403997c12019d0f82a9480207bf85985b8de5e7 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 29 Apr 2024 10:13:10 +0200
-Subject: [PATCH] spi: airoha: add SPI-NAND Flash controller driver
-
-Introduce support for SPI-NAND driver of the Airoha NAND Flash Interface
-found on Airoha ARM SoCs.
-
-Tested-by: Rajeev Kumar <Rajeev.Kumar@airoha.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Andy Shevchenko <andy@kernel.org>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Link: https://lore.kernel.org/r/6c9db20505b01a66807995374f2af475a23ce5b2.1714377864.git.lorenzo@kernel.org
-Signed-off-by: Mark Brown <broonie@kernel.org>
----
- MAINTAINERS                   |    9 +
- drivers/spi/Kconfig           |   10 +
- drivers/spi/Makefile          |    1 +
- drivers/spi/spi-airoha-snfi.c | 1129 +++++++++++++++++++++++++++++++++
- 4 files changed, 1149 insertions(+)
- create mode 100644 drivers/spi/spi-airoha-snfi.c
-
-# diff --git a/MAINTAINERS b/MAINTAINERS
-# index 2b63ed114532..dde7dd956156 100644
-# --- a/MAINTAINERS
-# +++ b/MAINTAINERS
-# @@ -653,6 +653,15 @@ S:      Supported
-#  F:  fs/aio.c
-#  F:  include/linux/*aio*.h
-# +AIROHA SPI SNFI DRIVER
-# +M:  Lorenzo Bianconi <lorenzo@kernel.org>
-# +M:  Ray Liu <ray.liu@airoha.com>
-# +L:  linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-# +L:  linux-spi@vger.kernel.org
-# +S:  Maintained
-# +F:  Documentation/devicetree/bindings/spi/airoha,en7581-snand.yaml
-# +F:  drivers/spi/spi-airoha.c
-# +
-#  AIRSPY MEDIA DRIVER
-#  L:  linux-media@vger.kernel.org
-#  S:  Orphan
---- a/drivers/spi/Kconfig
-+++ b/drivers/spi/Kconfig
-@@ -57,6 +57,16 @@ config SPI_MEM
- comment "SPI Master Controller Drivers"
-+config SPI_AIROHA_SNFI
-+      tristate "Airoha SPI NAND Flash Interface"
-+      depends on ARCH_AIROHA || COMPILE_TEST
-+      depends on SPI_MASTER
-+      select REGMAP_MMIO
-+      help
-+        This enables support for SPI-NAND mode on the Airoha NAND
-+        Flash Interface found on Airoha ARM SoCs. This controller
-+        is implemented as a SPI-MEM controller.
-+
- config SPI_ALTERA
-       tristate "Altera SPI Controller platform driver"
-       select SPI_ALTERA_CORE
---- a/drivers/spi/Makefile
-+++ b/drivers/spi/Makefile
-@@ -14,6 +14,7 @@ obj-$(CONFIG_SPI_SPIDEV)             += spidev.o
- obj-$(CONFIG_SPI_LOOPBACK_TEST)               += spi-loopback-test.o
- # SPI master controller drivers (bus)
-+obj-$(CONFIG_SPI_AIROHA_SNFI)         += spi-airoha-snfi.o
- obj-$(CONFIG_SPI_ALTERA)              += spi-altera-platform.o
- obj-$(CONFIG_SPI_ALTERA_CORE)         += spi-altera-core.o
- obj-$(CONFIG_SPI_ALTERA_DFL)          += spi-altera-dfl.o
---- /dev/null
-+++ b/drivers/spi/spi-airoha-snfi.c
-@@ -0,0 +1,1129 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ * Author: Ray Liu <ray.liu@airoha.com>
-+ */
-+
-+#include <linux/bitfield.h>
-+#include <linux/clk.h>
-+#include <linux/delay.h>
-+#include <linux/device.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/errno.h>
-+#include <linux/limits.h>
-+#include <linux/math.h>
-+#include <linux/minmax.h>
-+#include <linux/module.h>
-+#include <linux/mutex.h>
-+#include <linux/platform_device.h>
-+#include <linux/property.h>
-+#include <linux/regmap.h>
-+#include <linux/sizes.h>
-+#include <linux/spi/spi.h>
-+#include <linux/spi/spi-mem.h>
-+#include <linux/types.h>
-+#include <asm/unaligned.h>
-+
-+/* SPI */
-+#define REG_SPI_CTRL_BASE                     0x1FA10000
-+
-+#define REG_SPI_CTRL_READ_MODE                        0x0000
-+#define REG_SPI_CTRL_READ_IDLE_EN             0x0004
-+#define REG_SPI_CTRL_SIDLY                    0x0008
-+#define REG_SPI_CTRL_CSHEXT                   0x000c
-+#define REG_SPI_CTRL_CSLEXT                   0x0010
-+
-+#define REG_SPI_CTRL_MTX_MODE_TOG             0x0014
-+#define SPI_CTRL_MTX_MODE_TOG                 GENMASK(3, 0)
-+
-+#define REG_SPI_CTRL_RDCTL_FSM                        0x0018
-+#define SPI_CTRL_RDCTL_FSM                    GENMASK(3, 0)
-+
-+#define REG_SPI_CTRL_MACMUX_SEL                       0x001c
-+
-+#define REG_SPI_CTRL_MANUAL_EN                        0x0020
-+#define SPI_CTRL_MANUAL_EN                    BIT(0)
-+
-+#define REG_SPI_CTRL_OPFIFO_EMPTY             0x0024
-+#define SPI_CTRL_OPFIFO_EMPTY                 BIT(0)
-+
-+#define REG_SPI_CTRL_OPFIFO_WDATA             0x0028
-+#define SPI_CTRL_OPFIFO_LEN                   GENMASK(8, 0)
-+#define SPI_CTRL_OPFIFO_OP                    GENMASK(13, 9)
-+
-+#define REG_SPI_CTRL_OPFIFO_FULL              0x002c
-+#define SPI_CTRL_OPFIFO_FULL                  BIT(0)
-+
-+#define REG_SPI_CTRL_OPFIFO_WR                        0x0030
-+#define SPI_CTRL_OPFIFO_WR                    BIT(0)
-+
-+#define REG_SPI_CTRL_DFIFO_FULL                       0x0034
-+#define SPI_CTRL_DFIFO_FULL                   BIT(0)
-+
-+#define REG_SPI_CTRL_DFIFO_WDATA              0x0038
-+#define SPI_CTRL_DFIFO_WDATA                  GENMASK(7, 0)
-+
-+#define REG_SPI_CTRL_DFIFO_EMPTY              0x003c
-+#define SPI_CTRL_DFIFO_EMPTY                  BIT(0)
-+
-+#define REG_SPI_CTRL_DFIFO_RD                 0x0040
-+#define SPI_CTRL_DFIFO_RD                     BIT(0)
-+
-+#define REG_SPI_CTRL_DFIFO_RDATA              0x0044
-+#define SPI_CTRL_DFIFO_RDATA                  GENMASK(7, 0)
-+
-+#define REG_SPI_CTRL_DUMMY                    0x0080
-+#define SPI_CTRL_CTRL_DUMMY                   GENMASK(3, 0)
-+
-+#define REG_SPI_CTRL_PROBE_SEL                        0x0088
-+#define REG_SPI_CTRL_INTERRUPT                        0x0090
-+#define REG_SPI_CTRL_INTERRUPT_EN             0x0094
-+#define REG_SPI_CTRL_SI_CK_SEL                        0x009c
-+#define REG_SPI_CTRL_SW_CFGNANDADDR_VAL               0x010c
-+#define REG_SPI_CTRL_SW_CFGNANDADDR_EN                0x0110
-+#define REG_SPI_CTRL_SFC_STRAP                        0x0114
-+
-+#define REG_SPI_CTRL_NFI2SPI_EN                       0x0130
-+#define SPI_CTRL_NFI2SPI_EN                   BIT(0)
-+
-+/* NFI2SPI */
-+#define REG_SPI_NFI_CNFG                      0x0000
-+#define SPI_NFI_DMA_MODE                      BIT(0)
-+#define SPI_NFI_READ_MODE                     BIT(1)
-+#define SPI_NFI_DMA_BURST_EN                  BIT(2)
-+#define SPI_NFI_HW_ECC_EN                     BIT(8)
-+#define SPI_NFI_AUTO_FDM_EN                   BIT(9)
-+#define SPI_NFI_OPMODE                                GENMASK(14, 12)
-+
-+#define REG_SPI_NFI_PAGEFMT                   0x0004
-+#define SPI_NFI_PAGE_SIZE                     GENMASK(1, 0)
-+#define SPI_NFI_SPARE_SIZE                    GENMASK(5, 4)
-+
-+#define REG_SPI_NFI_CON                               0x0008
-+#define SPI_NFI_FIFO_FLUSH                    BIT(0)
-+#define SPI_NFI_RST                           BIT(1)
-+#define SPI_NFI_RD_TRIG                               BIT(8)
-+#define SPI_NFI_WR_TRIG                               BIT(9)
-+#define SPI_NFI_SEC_NUM                               GENMASK(15, 12)
-+
-+#define REG_SPI_NFI_INTR_EN                   0x0010
-+#define SPI_NFI_RD_DONE_EN                    BIT(0)
-+#define SPI_NFI_WR_DONE_EN                    BIT(1)
-+#define SPI_NFI_RST_DONE_EN                   BIT(2)
-+#define SPI_NFI_ERASE_DONE_EN                 BIT(3)
-+#define SPI_NFI_BUSY_RETURN_EN                        BIT(4)
-+#define SPI_NFI_ACCESS_LOCK_EN                        BIT(5)
-+#define SPI_NFI_AHB_DONE_EN                   BIT(6)
-+#define SPI_NFI_ALL_IRQ_EN                                    \
-+      (SPI_NFI_RD_DONE_EN | SPI_NFI_WR_DONE_EN |              \
-+       SPI_NFI_RST_DONE_EN | SPI_NFI_ERASE_DONE_EN |          \
-+       SPI_NFI_BUSY_RETURN_EN | SPI_NFI_ACCESS_LOCK_EN |      \
-+       SPI_NFI_AHB_DONE_EN)
-+
-+#define REG_SPI_NFI_INTR                      0x0014
-+#define SPI_NFI_AHB_DONE                      BIT(6)
-+
-+#define REG_SPI_NFI_CMD                               0x0020
-+
-+#define REG_SPI_NFI_ADDR_NOB                  0x0030
-+#define SPI_NFI_ROW_ADDR_NOB                  GENMASK(6, 4)
-+
-+#define REG_SPI_NFI_STA                               0x0060
-+#define REG_SPI_NFI_FIFOSTA                   0x0064
-+#define REG_SPI_NFI_STRADDR                   0x0080
-+#define REG_SPI_NFI_FDM0L                     0x00a0
-+#define REG_SPI_NFI_FDM0M                     0x00a4
-+#define REG_SPI_NFI_FDM7L                     0x00d8
-+#define REG_SPI_NFI_FDM7M                     0x00dc
-+#define REG_SPI_NFI_FIFODATA0                 0x0190
-+#define REG_SPI_NFI_FIFODATA1                 0x0194
-+#define REG_SPI_NFI_FIFODATA2                 0x0198
-+#define REG_SPI_NFI_FIFODATA3                 0x019c
-+#define REG_SPI_NFI_MASTERSTA                 0x0224
-+
-+#define REG_SPI_NFI_SECCUS_SIZE                       0x022c
-+#define SPI_NFI_CUS_SEC_SIZE                  GENMASK(12, 0)
-+#define SPI_NFI_CUS_SEC_SIZE_EN                       BIT(16)
-+
-+#define REG_SPI_NFI_RD_CTL2                   0x0510
-+#define REG_SPI_NFI_RD_CTL3                   0x0514
-+
-+#define REG_SPI_NFI_PG_CTL1                   0x0524
-+#define SPI_NFI_PG_LOAD_CMD                   GENMASK(15, 8)
-+
-+#define REG_SPI_NFI_PG_CTL2                   0x0528
-+#define REG_SPI_NFI_NOR_PROG_ADDR             0x052c
-+#define REG_SPI_NFI_NOR_RD_ADDR                       0x0534
-+
-+#define REG_SPI_NFI_SNF_MISC_CTL              0x0538
-+#define SPI_NFI_DATA_READ_WR_MODE             GENMASK(18, 16)
-+
-+#define REG_SPI_NFI_SNF_MISC_CTL2             0x053c
-+#define SPI_NFI_READ_DATA_BYTE_NUM            GENMASK(12, 0)
-+#define SPI_NFI_PROG_LOAD_BYTE_NUM            GENMASK(28, 16)
-+
-+#define REG_SPI_NFI_SNF_STA_CTL1              0x0550
-+#define SPI_NFI_READ_FROM_CACHE_DONE          BIT(25)
-+#define SPI_NFI_LOAD_TO_CACHE_DONE            BIT(26)
-+
-+#define REG_SPI_NFI_SNF_STA_CTL2              0x0554
-+
-+#define REG_SPI_NFI_SNF_NFI_CNFG              0x055c
-+#define SPI_NFI_SPI_MODE                      BIT(0)
-+
-+/* SPI NAND Protocol OP */
-+#define SPI_NAND_OP_GET_FEATURE                       0x0f
-+#define SPI_NAND_OP_SET_FEATURE                       0x1f
-+#define SPI_NAND_OP_PAGE_READ                 0x13
-+#define SPI_NAND_OP_READ_FROM_CACHE_SINGLE    0x03
-+#define SPI_NAND_OP_READ_FROM_CACHE_SINGLE_FAST       0x0b
-+#define SPI_NAND_OP_READ_FROM_CACHE_DUAL      0x3b
-+#define SPI_NAND_OP_READ_FROM_CACHE_QUAD      0x6b
-+#define SPI_NAND_OP_WRITE_ENABLE              0x06
-+#define SPI_NAND_OP_WRITE_DISABLE             0x04
-+#define SPI_NAND_OP_PROGRAM_LOAD_SINGLE               0x02
-+#define SPI_NAND_OP_PROGRAM_LOAD_QUAD         0x32
-+#define SPI_NAND_OP_PROGRAM_LOAD_RAMDOM_SINGLE        0x84
-+#define SPI_NAND_OP_PROGRAM_LOAD_RAMDON_QUAD  0x34
-+#define SPI_NAND_OP_PROGRAM_EXECUTE           0x10
-+#define SPI_NAND_OP_READ_ID                   0x9f
-+#define SPI_NAND_OP_BLOCK_ERASE                       0xd8
-+#define SPI_NAND_OP_RESET                     0xff
-+#define SPI_NAND_OP_DIE_SELECT                        0xc2
-+
-+#define SPI_NAND_CACHE_SIZE                   (SZ_4K + SZ_256)
-+#define SPI_MAX_TRANSFER_SIZE                 511
-+
-+enum airoha_snand_mode {
-+      SPI_MODE_AUTO,
-+      SPI_MODE_MANUAL,
-+      SPI_MODE_DMA,
-+};
-+
-+enum airoha_snand_cs {
-+      SPI_CHIP_SEL_HIGH,
-+      SPI_CHIP_SEL_LOW,
-+};
-+
-+struct airoha_snand_dev {
-+      size_t buf_len;
-+
-+      u8 *txrx_buf;
-+      dma_addr_t dma_addr;
-+
-+      u64 cur_page_num;
-+      bool data_need_update;
-+};
-+
-+struct airoha_snand_ctrl {
-+      struct device *dev;
-+      struct regmap *regmap_ctrl;
-+      struct regmap *regmap_nfi;
-+      struct clk *spi_clk;
-+
-+      struct {
-+              size_t page_size;
-+              size_t sec_size;
-+              u8 sec_num;
-+              u8 spare_size;
-+      } nfi_cfg;
-+};
-+
-+static int airoha_snand_set_fifo_op(struct airoha_snand_ctrl *as_ctrl,
-+                                  u8 op_cmd, int op_len)
-+{
-+      int err;
-+      u32 val;
-+
-+      err = regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_OPFIFO_WDATA,
-+                         FIELD_PREP(SPI_CTRL_OPFIFO_LEN, op_len) |
-+                         FIELD_PREP(SPI_CTRL_OPFIFO_OP, op_cmd));
-+      if (err)
-+              return err;
-+
-+      err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl,
-+                                     REG_SPI_CTRL_OPFIFO_FULL,
-+                                     val, !(val & SPI_CTRL_OPFIFO_FULL),
-+                                     0, 250 * USEC_PER_MSEC);
-+      if (err)
-+              return err;
-+
-+      err = regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_OPFIFO_WR,
-+                         SPI_CTRL_OPFIFO_WR);
-+      if (err)
-+              return err;
-+
-+      return regmap_read_poll_timeout(as_ctrl->regmap_ctrl,
-+                                      REG_SPI_CTRL_OPFIFO_EMPTY,
-+                                      val, (val & SPI_CTRL_OPFIFO_EMPTY),
-+                                      0, 250 * USEC_PER_MSEC);
-+}
-+
-+static int airoha_snand_set_cs(struct airoha_snand_ctrl *as_ctrl, u8 cs)
-+{
-+      return airoha_snand_set_fifo_op(as_ctrl, cs, sizeof(cs));
-+}
-+
-+static int airoha_snand_write_data_to_fifo(struct airoha_snand_ctrl *as_ctrl,
-+                                         const u8 *data, int len)
-+{
-+      int i;
-+
-+      for (i = 0; i < len; i++) {
-+              int err;
-+              u32 val;
-+
-+              /* 1. Wait until dfifo is not full */
-+              err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl,
-+                                             REG_SPI_CTRL_DFIFO_FULL, val,
-+                                             !(val & SPI_CTRL_DFIFO_FULL),
-+                                             0, 250 * USEC_PER_MSEC);
-+              if (err)
-+                      return err;
-+
-+              /* 2. Write data to register DFIFO_WDATA */
-+              err = regmap_write(as_ctrl->regmap_ctrl,
-+                                 REG_SPI_CTRL_DFIFO_WDATA,
-+                                 FIELD_PREP(SPI_CTRL_DFIFO_WDATA, data[i]));
-+              if (err)
-+                      return err;
-+
-+              /* 3. Wait until dfifo is not full */
-+              err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl,
-+                                             REG_SPI_CTRL_DFIFO_FULL, val,
-+                                             !(val & SPI_CTRL_DFIFO_FULL),
-+                                             0, 250 * USEC_PER_MSEC);
-+              if (err)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_snand_read_data_from_fifo(struct airoha_snand_ctrl *as_ctrl,
-+                                          u8 *ptr, int len)
-+{
-+      int i;
-+
-+      for (i = 0; i < len; i++) {
-+              int err;
-+              u32 val;
-+
-+              /* 1. wait until dfifo is not empty */
-+              err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl,
-+                                             REG_SPI_CTRL_DFIFO_EMPTY, val,
-+                                             !(val & SPI_CTRL_DFIFO_EMPTY),
-+                                             0, 250 * USEC_PER_MSEC);
-+              if (err)
-+                      return err;
-+
-+              /* 2. read from dfifo to register DFIFO_RDATA */
-+              err = regmap_read(as_ctrl->regmap_ctrl,
-+                                REG_SPI_CTRL_DFIFO_RDATA, &val);
-+              if (err)
-+                      return err;
-+
-+              ptr[i] = FIELD_GET(SPI_CTRL_DFIFO_RDATA, val);
-+              /* 3. enable register DFIFO_RD to read next byte */
-+              err = regmap_write(as_ctrl->regmap_ctrl,
-+                                 REG_SPI_CTRL_DFIFO_RD, SPI_CTRL_DFIFO_RD);
-+              if (err)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_snand_set_mode(struct airoha_snand_ctrl *as_ctrl,
-+                               enum airoha_snand_mode mode)
-+{
-+      int err;
-+
-+      switch (mode) {
-+      case SPI_MODE_MANUAL: {
-+              u32 val;
-+
-+              err = regmap_write(as_ctrl->regmap_ctrl,
-+                                 REG_SPI_CTRL_NFI2SPI_EN, 0);
-+              if (err)
-+                      return err;
-+
-+              err = regmap_write(as_ctrl->regmap_ctrl,
-+                                 REG_SPI_CTRL_READ_IDLE_EN, 0);
-+              if (err)
-+                      return err;
-+
-+              err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl,
-+                                             REG_SPI_CTRL_RDCTL_FSM, val,
-+                                             !(val & SPI_CTRL_RDCTL_FSM),
-+                                             0, 250 * USEC_PER_MSEC);
-+              if (err)
-+                      return err;
-+
-+              err = regmap_write(as_ctrl->regmap_ctrl,
-+                                 REG_SPI_CTRL_MTX_MODE_TOG, 9);
-+              if (err)
-+                      return err;
-+
-+              err = regmap_write(as_ctrl->regmap_ctrl,
-+                                 REG_SPI_CTRL_MANUAL_EN, SPI_CTRL_MANUAL_EN);
-+              if (err)
-+                      return err;
-+              break;
-+      }
-+      case SPI_MODE_DMA:
-+              err = regmap_write(as_ctrl->regmap_ctrl,
-+                                 REG_SPI_CTRL_NFI2SPI_EN,
-+                                 SPI_CTRL_MANUAL_EN);
-+              if (err < 0)
-+                      return err;
-+
-+              err = regmap_write(as_ctrl->regmap_ctrl,
-+                                 REG_SPI_CTRL_MTX_MODE_TOG, 0x0);
-+              if (err < 0)
-+                      return err;
-+
-+              err = regmap_write(as_ctrl->regmap_ctrl,
-+                                 REG_SPI_CTRL_MANUAL_EN, 0x0);
-+              if (err < 0)
-+                      return err;
-+              break;
-+      case SPI_MODE_AUTO:
-+      default:
-+              break;
-+      }
-+
-+      return regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_DUMMY, 0);
-+}
-+
-+static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd,
-+                                 const u8 *data, int len)
-+{
-+      int i, data_len;
-+
-+      for (i = 0; i < len; i += data_len) {
-+              int err;
-+
-+              data_len = min(len, SPI_MAX_TRANSFER_SIZE);
-+              err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len);
-+              if (err)
-+                      return err;
-+
-+              err = airoha_snand_write_data_to_fifo(as_ctrl, &data[i],
-+                                                    data_len);
-+              if (err < 0)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data,
-+                                int len)
-+{
-+      int i, data_len;
-+
-+      for (i = 0; i < len; i += data_len) {
-+              int err;
-+
-+              data_len = min(len, SPI_MAX_TRANSFER_SIZE);
-+              err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len);
-+              if (err)
-+                      return err;
-+
-+              err = airoha_snand_read_data_from_fifo(as_ctrl, &data[i],
-+                                                     data_len);
-+              if (err < 0)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_snand_nfi_init(struct airoha_snand_ctrl *as_ctrl)
-+{
-+      int err;
-+
-+      /* switch to SNFI mode */
-+      err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_NFI_CNFG,
-+                         SPI_NFI_SPI_MODE);
-+      if (err)
-+              return err;
-+
-+      /* Enable DMA */
-+      return regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR_EN,
-+                                SPI_NFI_ALL_IRQ_EN, SPI_NFI_AHB_DONE_EN);
-+}
-+
-+static int airoha_snand_nfi_config(struct airoha_snand_ctrl *as_ctrl)
-+{
-+      int err;
-+      u32 val;
-+
-+      err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
-+                         SPI_NFI_FIFO_FLUSH | SPI_NFI_RST);
-+      if (err)
-+              return err;
-+
-+      /* auto FDM */
-+      err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-+                              SPI_NFI_AUTO_FDM_EN);
-+      if (err)
-+              return err;
-+
-+      /* HW ECC */
-+      err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-+                              SPI_NFI_HW_ECC_EN);
-+      if (err)
-+              return err;
-+
-+      /* DMA Burst */
-+      err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-+                            SPI_NFI_DMA_BURST_EN);
-+      if (err)
-+              return err;
-+
-+      /* page format */
-+      switch (as_ctrl->nfi_cfg.spare_size) {
-+      case 26:
-+              val = FIELD_PREP(SPI_NFI_SPARE_SIZE, 0x1);
-+              break;
-+      case 27:
-+              val = FIELD_PREP(SPI_NFI_SPARE_SIZE, 0x2);
-+              break;
-+      case 28:
-+              val = FIELD_PREP(SPI_NFI_SPARE_SIZE, 0x3);
-+              break;
-+      default:
-+              val = FIELD_PREP(SPI_NFI_SPARE_SIZE, 0x0);
-+              break;
-+      }
-+
-+      err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_PAGEFMT,
-+                               SPI_NFI_SPARE_SIZE, val);
-+      if (err)
-+              return err;
-+
-+      switch (as_ctrl->nfi_cfg.page_size) {
-+      case 2048:
-+              val = FIELD_PREP(SPI_NFI_PAGE_SIZE, 0x1);
-+              break;
-+      case 4096:
-+              val = FIELD_PREP(SPI_NFI_PAGE_SIZE, 0x2);
-+              break;
-+      default:
-+              val = FIELD_PREP(SPI_NFI_PAGE_SIZE, 0x0);
-+              break;
-+      }
-+
-+      err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_PAGEFMT,
-+                               SPI_NFI_PAGE_SIZE, val);
-+      if (err)
-+              return err;
-+
-+      /* sec num */
-+      val = FIELD_PREP(SPI_NFI_SEC_NUM, as_ctrl->nfi_cfg.sec_num);
-+      err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
-+                               SPI_NFI_SEC_NUM, val);
-+      if (err)
-+              return err;
-+
-+      /* enable cust sec size */
-+      err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SECCUS_SIZE,
-+                            SPI_NFI_CUS_SEC_SIZE_EN);
-+      if (err)
-+              return err;
-+
-+      /* set cust sec size */
-+      val = FIELD_PREP(SPI_NFI_CUS_SEC_SIZE, as_ctrl->nfi_cfg.sec_size);
-+      return regmap_update_bits(as_ctrl->regmap_nfi,
-+                                REG_SPI_NFI_SECCUS_SIZE,
-+                                SPI_NFI_CUS_SEC_SIZE, val);
-+}
-+
-+static bool airoha_snand_is_page_ops(const struct spi_mem_op *op)
-+{
-+      if (op->addr.nbytes != 2)
-+              return false;
-+
-+      if (op->addr.buswidth != 1 && op->addr.buswidth != 2 &&
-+          op->addr.buswidth != 4)
-+              return false;
-+
-+      switch (op->data.dir) {
-+      case SPI_MEM_DATA_IN:
-+              if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth > 0xf)
-+                      return false;
-+
-+              /* quad in / quad out */
-+              if (op->addr.buswidth == 4)
-+                      return op->data.buswidth == 4;
-+
-+              if (op->addr.buswidth == 2)
-+                      return op->data.buswidth == 2;
-+
-+              /* standard spi */
-+              return op->data.buswidth == 4 || op->data.buswidth == 2 ||
-+                     op->data.buswidth == 1;
-+      case SPI_MEM_DATA_OUT:
-+              return !op->dummy.nbytes && op->addr.buswidth == 1 &&
-+                     (op->data.buswidth == 4 || op->data.buswidth == 1);
-+      default:
-+              return false;
-+      }
-+}
-+
-+static int airoha_snand_adjust_op_size(struct spi_mem *mem,
-+                                     struct spi_mem_op *op)
-+{
-+      size_t max_len;
-+
-+      if (airoha_snand_is_page_ops(op)) {
-+              struct airoha_snand_ctrl *as_ctrl;
-+
-+              as_ctrl = spi_controller_get_devdata(mem->spi->controller);
-+              max_len = as_ctrl->nfi_cfg.sec_size;
-+              max_len += as_ctrl->nfi_cfg.spare_size;
-+              max_len *= as_ctrl->nfi_cfg.sec_num;
-+
-+              if (op->data.nbytes > max_len)
-+                      op->data.nbytes = max_len;
-+      } else {
-+              max_len = 1 + op->addr.nbytes + op->dummy.nbytes;
-+              if (max_len >= 160)
-+                      return -EOPNOTSUPP;
-+
-+              if (op->data.nbytes > 160 - max_len)
-+                      op->data.nbytes = 160 - max_len;
-+      }
-+
-+      return 0;
-+}
-+
-+static bool airoha_snand_supports_op(struct spi_mem *mem,
-+                                   const struct spi_mem_op *op)
-+{
-+      if (!spi_mem_default_supports_op(mem, op))
-+              return false;
-+
-+      if (op->cmd.buswidth != 1)
-+              return false;
-+
-+      if (airoha_snand_is_page_ops(op))
-+              return true;
-+
-+      return (!op->addr.nbytes || op->addr.buswidth == 1) &&
-+             (!op->dummy.nbytes || op->dummy.buswidth == 1) &&
-+             (!op->data.nbytes || op->data.buswidth == 1);
-+}
-+
-+static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc)
-+{
-+      struct airoha_snand_dev *as_dev = spi_get_ctldata(desc->mem->spi);
-+
-+      if (!as_dev->txrx_buf)
-+              return -EINVAL;
-+
-+      if (desc->info.offset + desc->info.length > U32_MAX)
-+              return -EINVAL;
-+
-+      if (!airoha_snand_supports_op(desc->mem, &desc->info.op_tmpl))
-+              return -EOPNOTSUPP;
-+
-+      return 0;
-+}
-+
-+static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
-+                                      u64 offs, size_t len, void *buf)
-+{
-+      struct spi_device *spi = desc->mem->spi;
-+      struct airoha_snand_dev *as_dev = spi_get_ctldata(spi);
-+      struct spi_mem_op *op = &desc->info.op_tmpl;
-+      struct airoha_snand_ctrl *as_ctrl;
-+      u32 val, rd_mode;
-+      int err;
-+
-+      if (!as_dev->data_need_update)
-+              return len;
-+
-+      as_dev->data_need_update = false;
-+
-+      switch (op->cmd.opcode) {
-+      case SPI_NAND_OP_READ_FROM_CACHE_DUAL:
-+              rd_mode = 1;
-+              break;
-+      case SPI_NAND_OP_READ_FROM_CACHE_QUAD:
-+              rd_mode = 2;
-+              break;
-+      default:
-+              rd_mode = 0;
-+              break;
-+      }
-+
-+      as_ctrl = spi_controller_get_devdata(spi->controller);
-+      err = airoha_snand_set_mode(as_ctrl, SPI_MODE_DMA);
-+      if (err < 0)
-+              return err;
-+
-+      err = airoha_snand_nfi_config(as_ctrl);
-+      if (err)
-+              return err;
-+
-+      dma_sync_single_for_device(as_ctrl->dev, as_dev->dma_addr,
-+                                 as_dev->buf_len, DMA_BIDIRECTIONAL);
-+
-+      /* set dma addr */
-+      err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR,
-+                         as_dev->dma_addr);
-+      if (err)
-+              return err;
-+
-+      /* set cust sec size */
-+      val = as_ctrl->nfi_cfg.sec_size * as_ctrl->nfi_cfg.sec_num;
-+      val = FIELD_PREP(SPI_NFI_READ_DATA_BYTE_NUM, val);
-+      err = regmap_update_bits(as_ctrl->regmap_nfi,
-+                               REG_SPI_NFI_SNF_MISC_CTL2,
-+                               SPI_NFI_READ_DATA_BYTE_NUM, val);
-+      if (err)
-+              return err;
-+
-+      /* set read command */
-+      err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL2,
-+                         op->cmd.opcode);
-+      if (err)
-+              return err;
-+
-+      /* set read mode */
-+      err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL,
-+                         FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, rd_mode));
-+      if (err)
-+              return err;
-+
-+      /* set read addr */
-+      err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, 0x0);
-+      if (err)
-+              return err;
-+
-+      /* set nfi read */
-+      err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-+                               SPI_NFI_OPMODE,
-+                               FIELD_PREP(SPI_NFI_OPMODE, 6));
-+      if (err)
-+              return err;
-+
-+      err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-+                            SPI_NFI_READ_MODE | SPI_NFI_DMA_MODE);
-+      if (err)
-+              return err;
-+
-+      err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CMD, 0x0);
-+      if (err)
-+              return err;
-+
-+      /* trigger dma start read */
-+      err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
-+                              SPI_NFI_RD_TRIG);
-+      if (err)
-+              return err;
-+
-+      err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
-+                            SPI_NFI_RD_TRIG);
-+      if (err)
-+              return err;
-+
-+      err = regmap_read_poll_timeout(as_ctrl->regmap_nfi,
-+                                     REG_SPI_NFI_SNF_STA_CTL1, val,
-+                                     (val & SPI_NFI_READ_FROM_CACHE_DONE),
-+                                     0, 1 * USEC_PER_SEC);
-+      if (err)
-+              return err;
-+
-+      err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
-+                            SPI_NFI_READ_FROM_CACHE_DONE);
-+      if (err)
-+              return err;
-+
-+      err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR,
-+                                     val, (val & SPI_NFI_AHB_DONE), 0,
-+                                     1 * USEC_PER_SEC);
-+      if (err)
-+              return err;
-+
-+      /* DMA read need delay for data ready from controller to DRAM */
-+      udelay(1);
-+
-+      dma_sync_single_for_cpu(as_ctrl->dev, as_dev->dma_addr,
-+                              as_dev->buf_len, DMA_BIDIRECTIONAL);
-+      err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
-+      if (err < 0)
-+              return err;
-+
-+      memcpy(buf, as_dev->txrx_buf + offs, len);
-+
-+      return len;
-+}
-+
-+static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
-+                                       u64 offs, size_t len, const void *buf)
-+{
-+      struct spi_device *spi = desc->mem->spi;
-+      struct airoha_snand_dev *as_dev = spi_get_ctldata(spi);
-+      struct spi_mem_op *op = &desc->info.op_tmpl;
-+      struct airoha_snand_ctrl *as_ctrl;
-+      u32 wr_mode, val;
-+      int err;
-+
-+      as_ctrl = spi_controller_get_devdata(spi->controller);
-+      err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
-+      if (err < 0)
-+              return err;
-+
-+      dma_sync_single_for_cpu(as_ctrl->dev, as_dev->dma_addr,
-+                              as_dev->buf_len, DMA_BIDIRECTIONAL);
-+      memcpy(as_dev->txrx_buf + offs, buf, len);
-+      dma_sync_single_for_device(as_ctrl->dev, as_dev->dma_addr,
-+                                 as_dev->buf_len, DMA_BIDIRECTIONAL);
-+
-+      err = airoha_snand_set_mode(as_ctrl, SPI_MODE_DMA);
-+      if (err < 0)
-+              return err;
-+
-+      err = airoha_snand_nfi_config(as_ctrl);
-+      if (err)
-+              return err;
-+
-+      if (op->cmd.opcode == SPI_NAND_OP_PROGRAM_LOAD_QUAD ||
-+          op->cmd.opcode == SPI_NAND_OP_PROGRAM_LOAD_RAMDON_QUAD)
-+              wr_mode = BIT(1);
-+      else
-+              wr_mode = 0;
-+
-+      err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR,
-+                         as_dev->dma_addr);
-+      if (err)
-+              return err;
-+
-+      val = FIELD_PREP(SPI_NFI_PROG_LOAD_BYTE_NUM,
-+                       as_ctrl->nfi_cfg.sec_size * as_ctrl->nfi_cfg.sec_num);
-+      err = regmap_update_bits(as_ctrl->regmap_nfi,
-+                               REG_SPI_NFI_SNF_MISC_CTL2,
-+                               SPI_NFI_PROG_LOAD_BYTE_NUM, val);
-+      if (err)
-+              return err;
-+
-+      err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL1,
-+                         FIELD_PREP(SPI_NFI_PG_LOAD_CMD,
-+                                    op->cmd.opcode));
-+      if (err)
-+              return err;
-+
-+      err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL,
-+                         FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, wr_mode));
-+      if (err)
-+              return err;
-+
-+      err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, 0x0);
-+      if (err)
-+              return err;
-+
-+      err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-+                              SPI_NFI_READ_MODE);
-+      if (err)
-+              return err;
-+
-+      err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-+                               SPI_NFI_OPMODE,
-+                               FIELD_PREP(SPI_NFI_OPMODE, 3));
-+      if (err)
-+              return err;
-+
-+      err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-+                            SPI_NFI_DMA_MODE);
-+      if (err)
-+              return err;
-+
-+      err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CMD, 0x80);
-+      if (err)
-+              return err;
-+
-+      err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
-+                              SPI_NFI_WR_TRIG);
-+      if (err)
-+              return err;
-+
-+      err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
-+                            SPI_NFI_WR_TRIG);
-+      if (err)
-+              return err;
-+
-+      err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR,
-+                                     val, (val & SPI_NFI_AHB_DONE), 0,
-+                                     1 * USEC_PER_SEC);
-+      if (err)
-+              return err;
-+
-+      err = regmap_read_poll_timeout(as_ctrl->regmap_nfi,
-+                                     REG_SPI_NFI_SNF_STA_CTL1, val,
-+                                     (val & SPI_NFI_LOAD_TO_CACHE_DONE),
-+                                     0, 1 * USEC_PER_SEC);
-+      if (err)
-+              return err;
-+
-+      err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
-+                            SPI_NFI_LOAD_TO_CACHE_DONE);
-+      if (err)
-+              return err;
-+
-+      err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
-+      if (err < 0)
-+              return err;
-+
-+      return len;
-+}
-+
-+static int airoha_snand_exec_op(struct spi_mem *mem,
-+                              const struct spi_mem_op *op)
-+{
-+      struct airoha_snand_dev *as_dev = spi_get_ctldata(mem->spi);
-+      u8 data[8], cmd, opcode = op->cmd.opcode;
-+      struct airoha_snand_ctrl *as_ctrl;
-+      int i, err;
-+
-+      as_ctrl = spi_controller_get_devdata(mem->spi->controller);
-+      if (opcode == SPI_NAND_OP_PROGRAM_EXECUTE &&
-+          op->addr.val == as_dev->cur_page_num) {
-+              as_dev->data_need_update = true;
-+      } else if (opcode == SPI_NAND_OP_PAGE_READ) {
-+              if (!as_dev->data_need_update &&
-+                  op->addr.val == as_dev->cur_page_num)
-+                      return 0;
-+
-+              as_dev->data_need_update = true;
-+              as_dev->cur_page_num = op->addr.val;
-+      }
-+
-+      /* switch to manual mode */
-+      err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
-+      if (err < 0)
-+              return err;
-+
-+      err = airoha_snand_set_cs(as_ctrl, SPI_CHIP_SEL_LOW);
-+      if (err < 0)
-+              return err;
-+
-+      /* opcode */
-+      err = airoha_snand_write_data(as_ctrl, 0x8, &opcode, sizeof(opcode));
-+      if (err)
-+              return err;
-+
-+      /* addr part */
-+      cmd = opcode == SPI_NAND_OP_GET_FEATURE ? 0x11 : 0x8;
-+      put_unaligned_be64(op->addr.val, data);
-+
-+      for (i = ARRAY_SIZE(data) - op->addr.nbytes;
-+           i < ARRAY_SIZE(data); i++) {
-+              err = airoha_snand_write_data(as_ctrl, cmd, &data[i],
-+                                            sizeof(data[0]));
-+              if (err)
-+                      return err;
-+      }
-+
-+      /* dummy */
-+      data[0] = 0xff;
-+      for (i = 0; i < op->dummy.nbytes; i++) {
-+              err = airoha_snand_write_data(as_ctrl, 0x8, &data[0],
-+                                            sizeof(data[0]));
-+              if (err)
-+                      return err;
-+      }
-+
-+      /* data */
-+      if (op->data.dir == SPI_MEM_DATA_IN) {
-+              err = airoha_snand_read_data(as_ctrl, op->data.buf.in,
-+                                           op->data.nbytes);
-+              if (err)
-+                      return err;
-+      } else {
-+              err = airoha_snand_write_data(as_ctrl, 0x8, op->data.buf.out,
-+                                            op->data.nbytes);
-+              if (err)
-+                      return err;
-+      }
-+
-+      return airoha_snand_set_cs(as_ctrl, SPI_CHIP_SEL_HIGH);
-+}
-+
-+static const struct spi_controller_mem_ops airoha_snand_mem_ops = {
-+      .adjust_op_size = airoha_snand_adjust_op_size,
-+      .supports_op = airoha_snand_supports_op,
-+      .exec_op = airoha_snand_exec_op,
-+      .dirmap_create = airoha_snand_dirmap_create,
-+      .dirmap_read = airoha_snand_dirmap_read,
-+      .dirmap_write = airoha_snand_dirmap_write,
-+};
-+
-+static int airoha_snand_setup(struct spi_device *spi)
-+{
-+      struct airoha_snand_ctrl *as_ctrl;
-+      struct airoha_snand_dev *as_dev;
-+
-+      as_ctrl = spi_controller_get_devdata(spi->controller);
-+
-+      as_dev = devm_kzalloc(as_ctrl->dev, sizeof(*as_dev), GFP_KERNEL);
-+      if (!as_dev)
-+              return -ENOMEM;
-+
-+      /* prepare device buffer */
-+      as_dev->buf_len = SPI_NAND_CACHE_SIZE;
-+      as_dev->txrx_buf = devm_kzalloc(as_ctrl->dev, as_dev->buf_len,
-+                                      GFP_KERNEL);
-+      if (!as_dev->txrx_buf)
-+              return -ENOMEM;
-+
-+      as_dev->dma_addr = dma_map_single(as_ctrl->dev, as_dev->txrx_buf,
-+                                        as_dev->buf_len, DMA_BIDIRECTIONAL);
-+      if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr))
-+              return -ENOMEM;
-+
-+      as_dev->data_need_update = true;
-+      spi_set_ctldata(spi, as_dev);
-+
-+      return 0;
-+}
-+
-+static void airoha_snand_cleanup(struct spi_device *spi)
-+{
-+      struct airoha_snand_dev *as_dev = spi_get_ctldata(spi);
-+      struct airoha_snand_ctrl *as_ctrl;
-+
-+      as_ctrl = spi_controller_get_devdata(spi->controller);
-+      dma_unmap_single(as_ctrl->dev, as_dev->dma_addr,
-+                       as_dev->buf_len, DMA_BIDIRECTIONAL);
-+      spi_set_ctldata(spi, NULL);
-+}
-+
-+static int airoha_snand_nfi_setup(struct airoha_snand_ctrl *as_ctrl)
-+{
-+      u32 val, sec_size, sec_num;
-+      int err;
-+
-+      err = regmap_read(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, &val);
-+      if (err)
-+              return err;
-+
-+      sec_num = FIELD_GET(SPI_NFI_SEC_NUM, val);
-+
-+      err = regmap_read(as_ctrl->regmap_nfi, REG_SPI_NFI_SECCUS_SIZE, &val);
-+      if (err)
-+              return err;
-+
-+      sec_size = FIELD_GET(SPI_NFI_CUS_SEC_SIZE, val);
-+
-+      /* init default value */
-+      as_ctrl->nfi_cfg.sec_size = sec_size;
-+      as_ctrl->nfi_cfg.sec_num = sec_num;
-+      as_ctrl->nfi_cfg.page_size = round_down(sec_size * sec_num, 1024);
-+      as_ctrl->nfi_cfg.spare_size = 16;
-+
-+      err = airoha_snand_nfi_init(as_ctrl);
-+      if (err)
-+              return err;
-+
-+      return airoha_snand_nfi_config(as_ctrl);
-+}
-+
-+static const struct regmap_config spi_ctrl_regmap_config = {
-+      .name           = "ctrl",
-+      .reg_bits       = 32,
-+      .val_bits       = 32,
-+      .reg_stride     = 4,
-+      .max_register   = REG_SPI_CTRL_NFI2SPI_EN,
-+};
-+
-+static const struct regmap_config spi_nfi_regmap_config = {
-+      .name           = "nfi",
-+      .reg_bits       = 32,
-+      .val_bits       = 32,
-+      .reg_stride     = 4,
-+      .max_register   = REG_SPI_NFI_SNF_NFI_CNFG,
-+};
-+
-+static const struct of_device_id airoha_snand_ids[] = {
-+      { .compatible   = "airoha,en7581-snand" },
-+      { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, airoha_snand_ids);
-+
-+static int airoha_snand_probe(struct platform_device *pdev)
-+{
-+      struct airoha_snand_ctrl *as_ctrl;
-+      struct device *dev = &pdev->dev;
-+      struct spi_controller *ctrl;
-+      void __iomem *base;
-+      int err;
-+
-+      ctrl = devm_spi_alloc_host(dev, sizeof(*as_ctrl));
-+      if (!ctrl)
-+              return -ENOMEM;
-+
-+      as_ctrl = spi_controller_get_devdata(ctrl);
-+      as_ctrl->dev = dev;
-+
-+      base = devm_platform_ioremap_resource(pdev, 0);
-+      if (IS_ERR(base))
-+              return PTR_ERR(base);
-+
-+      as_ctrl->regmap_ctrl = devm_regmap_init_mmio(dev, base,
-+                                                   &spi_ctrl_regmap_config);
-+      if (IS_ERR(as_ctrl->regmap_ctrl))
-+              return dev_err_probe(dev, PTR_ERR(as_ctrl->regmap_ctrl),
-+                                   "failed to init spi ctrl regmap\n");
-+
-+      base = devm_platform_ioremap_resource(pdev, 1);
-+      if (IS_ERR(base))
-+              return PTR_ERR(base);
-+
-+      as_ctrl->regmap_nfi = devm_regmap_init_mmio(dev, base,
-+                                                  &spi_nfi_regmap_config);
-+      if (IS_ERR(as_ctrl->regmap_nfi))
-+              return dev_err_probe(dev, PTR_ERR(as_ctrl->regmap_nfi),
-+                                   "failed to init spi nfi regmap\n");
-+
-+      as_ctrl->spi_clk = devm_clk_get_enabled(dev, "spi");
-+      if (IS_ERR(as_ctrl->spi_clk))
-+              return dev_err_probe(dev, PTR_ERR(as_ctrl->spi_clk),
-+                                   "unable to get spi clk\n");
-+
-+      err = dma_set_mask(as_ctrl->dev, DMA_BIT_MASK(32));
-+      if (err)
-+              return err;
-+
-+      ctrl->num_chipselect = 2;
-+      ctrl->mem_ops = &airoha_snand_mem_ops;
-+      ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
-+      ctrl->mode_bits = SPI_RX_DUAL;
-+      ctrl->setup = airoha_snand_setup;
-+      ctrl->cleanup = airoha_snand_cleanup;
-+      device_set_node(&ctrl->dev, dev_fwnode(dev));
-+
-+      err = airoha_snand_nfi_setup(as_ctrl);
-+      if (err)
-+              return err;
-+
-+      return devm_spi_register_controller(dev, ctrl);
-+}
-+
-+static struct platform_driver airoha_snand_driver = {
-+      .driver = {
-+              .name = "airoha-spi",
-+              .of_match_table = airoha_snand_ids,
-+      },
-+      .probe = airoha_snand_probe,
-+};
-+module_platform_driver(airoha_snand_driver);
-+
-+MODULE_DESCRIPTION("Airoha SPI-NAND Flash Controller Driver");
-+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
-+MODULE_AUTHOR("Ray Liu <ray.liu@airoha.com>");
-+MODULE_LICENSE("GPL");
diff --git a/target/linux/airoha/patches-6.6/026-01-v6.12-spi-airoha-fix-dirmap_-read-write-operations.patch b/target/linux/airoha/patches-6.6/026-01-v6.12-spi-airoha-fix-dirmap_-read-write-operations.patch
deleted file mode 100644 (file)
index dce013a..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-From 2e6bbfe7b0c0607001b784082c2685b134174fac Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 13 Sep 2024 23:07:13 +0200
-Subject: [PATCH 1/2] spi: airoha: fix dirmap_{read,write} operations
-
-SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end of
-dirmap_read operation even if it is already set.
-In the same way, SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the
-end of dirmap_write operation even if it is already set.
-For this reason use regmap_write_bits() instead of regmap_set_bits().
-This patch fixes mtd_pagetest kernel module test.
-
-Fixes: a403997c1201 ("spi: airoha: add SPI-NAND Flash controller driver")
-Tested-by: Christian Marangi <ansuelsmth@gmail.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20240913-airoha-spi-fixes-v1-1-de2e74ed4664@kernel.org
-Signed-off-by: Mark Brown <broonie@kernel.org>
----
- drivers/spi/spi-airoha-snfi.c | 18 ++++++++++++++----
- 1 file changed, 14 insertions(+), 4 deletions(-)
-
---- a/drivers/spi/spi-airoha-snfi.c
-+++ b/drivers/spi/spi-airoha-snfi.c
-@@ -739,8 +739,13 @@ static ssize_t airoha_snand_dirmap_read(
-       if (err)
-               return err;
--      err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
--                            SPI_NFI_READ_FROM_CACHE_DONE);
-+      /*
-+       * SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end
-+       * of dirmap_read operation even if it is already set.
-+       */
-+      err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
-+                              SPI_NFI_READ_FROM_CACHE_DONE,
-+                              SPI_NFI_READ_FROM_CACHE_DONE);
-       if (err)
-               return err;
-@@ -870,8 +875,13 @@ static ssize_t airoha_snand_dirmap_write
-       if (err)
-               return err;
--      err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
--                            SPI_NFI_LOAD_TO_CACHE_DONE);
-+      /*
-+       * SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the end
-+       * of dirmap_write operation even if it is already set.
-+       */
-+      err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
-+                              SPI_NFI_LOAD_TO_CACHE_DONE,
-+                              SPI_NFI_LOAD_TO_CACHE_DONE);
-       if (err)
-               return err;
diff --git a/target/linux/airoha/patches-6.6/026-02-v6.12-spi-airoha-fix-airoha_snand_-write-read-_data-data_l.patch b/target/linux/airoha/patches-6.6/026-02-v6.12-spi-airoha-fix-airoha_snand_-write-read-_data-data_l.patch
deleted file mode 100644 (file)
index 738cb0c..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-From 0e58637eb968c636725dcd6c7055249b4e5326fb Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 13 Sep 2024 23:07:14 +0200
-Subject: [PATCH 2/2] spi: airoha: fix airoha_snand_{write,read}_data data_len
- estimation
-
-Fix data length written and read in airoha_snand_write_data and
-airoha_snand_read_data routines respectively if it is bigger than
-SPI_MAX_TRANSFER_SIZE.
-
-Fixes: a403997c1201 ("spi: airoha: add SPI-NAND Flash controller driver")
-Tested-by: Christian Marangi <ansuelsmth@gmail.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20240913-airoha-spi-fixes-v1-2-de2e74ed4664@kernel.org
-Signed-off-by: Mark Brown <broonie@kernel.org>
----
- drivers/spi/spi-airoha-snfi.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/drivers/spi/spi-airoha-snfi.c
-+++ b/drivers/spi/spi-airoha-snfi.c
-@@ -405,7 +405,7 @@ static int airoha_snand_write_data(struc
-       for (i = 0; i < len; i += data_len) {
-               int err;
--              data_len = min(len, SPI_MAX_TRANSFER_SIZE);
-+              data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
-               err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len);
-               if (err)
-                       return err;
-@@ -427,7 +427,7 @@ static int airoha_snand_read_data(struct
-       for (i = 0; i < len; i += data_len) {
-               int err;
--              data_len = min(len, SPI_MAX_TRANSFER_SIZE);
-+              data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
-               err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len);
-               if (err)
-                       return err;
diff --git a/target/linux/airoha/patches-6.6/027-v6.12-spi-airoha-remove-read-cache-in-airoha_snand_dirmap_.patch b/target/linux/airoha/patches-6.6/027-v6.12-spi-airoha-remove-read-cache-in-airoha_snand_dirmap_.patch
deleted file mode 100644 (file)
index d2d2b54..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-From fffca269e4f31c3633c6d810833ba1b184407915 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 19 Sep 2024 18:57:16 +0200
-Subject: [PATCH] spi: airoha: remove read cache in airoha_snand_dirmap_read()
-
-Current upstream driver reports errors running mtd_oobtest kernel module
-test:
-
-root@OpenWrt:/# insmod mtd_test.ko
-root@OpenWrt:/# insmod mtd_oobtest.ko dev=5
-[ 7023.730584] =================================================
-[ 7023.736399] mtd_oobtest: MTD device: 5
-[ 7023.740160] mtd_oobtest: MTD device size 3670016, eraseblock size 131072, page size 2048, count of eraseblocks 28, pages per eraseblock 64, OOB size 128
-[ 7023.753837] mtd_test: scanning for bad eraseblocks
-[ 7023.758636] mtd_test: scanned 28 eraseblocks, 0 are bad
-[ 7023.763861] mtd_oobtest: test 1 of 5
-[ 7024.042076] mtd_oobtest: writing OOBs of whole device
-[ 7024.682069] mtd_oobtest: written up to eraseblock 0
-[ 7041.962077] mtd_oobtest: written 28 eraseblocks
-[ 7041.966626] mtd_oobtest: verifying all eraseblocks
-[ 7041.972276] mtd_oobtest: error @addr[0x0:0x0] 0xff -> 0xe diff 0xf1
-[ 7041.978550] mtd_oobtest: error @addr[0x0:0x1] 0xff -> 0x10 diff 0xef
-[ 7041.984932] mtd_oobtest: error @addr[0x0:0x2] 0xff -> 0x82 diff 0x7d
-[ 7041.991293] mtd_oobtest: error @addr[0x0:0x3] 0xff -> 0x10 diff 0xef
-[ 7041.997659] mtd_oobtest: error @addr[0x0:0x4] 0xff -> 0x0 diff 0xff
-[ 7042.003942] mtd_oobtest: error @addr[0x0:0x5] 0xff -> 0x8a diff 0x75
-[ 7042.010294] mtd_oobtest: error @addr[0x0:0x6] 0xff -> 0x20 diff 0xdf
-[ 7042.016659] mtd_oobtest: error @addr[0x0:0x7] 0xff -> 0x1 diff 0xfe
-[ 7042.022935] mtd_oobtest: error @addr[0x0:0x8] 0xff -> 0x2e diff 0xd1
-[ 7042.029295] mtd_oobtest: error @addr[0x0:0x9] 0xff -> 0x40 diff 0xbf
-[ 7042.035661] mtd_oobtest: error @addr[0x0:0xa] 0xff -> 0x0 diff 0xff
-[ 7042.041935] mtd_oobtest: error @addr[0x0:0xb] 0xff -> 0x89 diff 0x76
-[ 7042.048300] mtd_oobtest: error @addr[0x0:0xc] 0xff -> 0x82 diff 0x7d
-[ 7042.054662] mtd_oobtest: error @addr[0x0:0xd] 0xff -> 0x15 diff 0xea
-[ 7042.061014] mtd_oobtest: error @addr[0x0:0xe] 0xff -> 0x90 diff 0x6f
-[ 7042.067380] mtd_oobtest: error @addr[0x0:0xf] 0xff -> 0x0 diff 0xff
-....
-[ 7432.421369] mtd_oobtest: error @addr[0x237800:0x36] 0xff -> 0x5f diff 0xa0
-[ 7432.428242] mtd_oobtest: error @addr[0x237800:0x37] 0xff -> 0x21 diff 0xde
-[ 7432.435118] mtd_oobtest: error: verify failed at 0x237800
-[ 7432.440510] mtd_oobtest: error: too many errors
-[ 7432.445053] mtd_oobtest: error -1 occurred
-
-The above errors are due to the buggy logic in the 'read cache' available
-in airoha_snand_dirmap_read() routine since there are some corner cases
-where we are missing data updates. Since we do not get any read/write speed
-improvement using the cache (according to the mtd_speedtest kernel
-module test), in order to fix the mtd_oobtest test, remove the 'read cache'
-in airoha_snand_dirmap_read routine. Now the driver is passing all the
-tests available in mtd_test suite.
-
-Fixes: a403997c1201 ("spi: airoha: add SPI-NAND Flash controller driver")
-Tested-by: Christian Marangi <ansuelsmth@gmail.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20240919-airoha-spi-fixes-v2-1-cb0f0ed9920a@kernel.org
-Signed-off-by: Mark Brown <broonie@kernel.org>
----
- drivers/spi/spi-airoha-snfi.c | 21 ---------------------
- 1 file changed, 21 deletions(-)
-
---- a/drivers/spi/spi-airoha-snfi.c
-+++ b/drivers/spi/spi-airoha-snfi.c
-@@ -211,9 +211,6 @@ struct airoha_snand_dev {
-       u8 *txrx_buf;
-       dma_addr_t dma_addr;
--
--      u64 cur_page_num;
--      bool data_need_update;
- };
- struct airoha_snand_ctrl {
-@@ -644,11 +641,6 @@ static ssize_t airoha_snand_dirmap_read(
-       u32 val, rd_mode;
-       int err;
--      if (!as_dev->data_need_update)
--              return len;
--
--      as_dev->data_need_update = false;
--
-       switch (op->cmd.opcode) {
-       case SPI_NAND_OP_READ_FROM_CACHE_DUAL:
-               rd_mode = 1;
-@@ -895,23 +887,11 @@ static ssize_t airoha_snand_dirmap_write
- static int airoha_snand_exec_op(struct spi_mem *mem,
-                               const struct spi_mem_op *op)
- {
--      struct airoha_snand_dev *as_dev = spi_get_ctldata(mem->spi);
-       u8 data[8], cmd, opcode = op->cmd.opcode;
-       struct airoha_snand_ctrl *as_ctrl;
-       int i, err;
-       as_ctrl = spi_controller_get_devdata(mem->spi->controller);
--      if (opcode == SPI_NAND_OP_PROGRAM_EXECUTE &&
--          op->addr.val == as_dev->cur_page_num) {
--              as_dev->data_need_update = true;
--      } else if (opcode == SPI_NAND_OP_PAGE_READ) {
--              if (!as_dev->data_need_update &&
--                  op->addr.val == as_dev->cur_page_num)
--                      return 0;
--
--              as_dev->data_need_update = true;
--              as_dev->cur_page_num = op->addr.val;
--      }
-       /* switch to manual mode */
-       err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
-@@ -996,7 +976,6 @@ static int airoha_snand_setup(struct spi
-       if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr))
-               return -ENOMEM;
--      as_dev->data_need_update = true;
-       spi_set_ctldata(spi, as_dev);
-       return 0;
diff --git a/target/linux/airoha/patches-6.6/028-v6.13-spi-airoha-do-not-keep-tx-rx-dma-buffer-always-mappe.patch b/target/linux/airoha/patches-6.6/028-v6.13-spi-airoha-do-not-keep-tx-rx-dma-buffer-always-mappe.patch
deleted file mode 100644 (file)
index 71e920c..0000000
+++ /dev/null
@@ -1,435 +0,0 @@
-From 7a4b3ebf1d60349587fee21872536e7bd6a4cf39 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sun, 22 Sep 2024 19:38:30 +0200
-Subject: [PATCH] spi: airoha: do not keep {tx,rx} dma buffer always mapped
-
-DMA map txrx_buf on demand in airoha_snand_dirmap_read and
-airoha_snand_dirmap_write routines and do not keep it always mapped.
-This patch is not fixing any bug or introducing any functional change
-to the driver, it just simplifies the code and improve code readability
-without introducing any performance degradation according to the results
-obtained from the mtd_speedtest kernel module test.
-
-root@OpenWrt:# insmod mtd_test.ko
-root@OpenWrt:# insmod mtd_speedtest.ko dev=5
-[   49.849869] =================================================
-[   49.855659] mtd_speedtest: MTD device: 5
-[   49.859583] mtd_speedtest: MTD device size 8388608, eraseblock size 131072, page size 2048, count of eraseblocks 64, pages per eraseblock 64, OOB size 128
-[   49.874622] mtd_test: scanning for bad eraseblocks
-[   49.879433] mtd_test: scanned 64 eraseblocks, 0 are bad
-[   50.106372] mtd_speedtest: testing eraseblock write speed
-[   53.083380] mtd_speedtest: eraseblock write speed is 2756 KiB/s
-[   53.089322] mtd_speedtest: testing eraseblock read speed
-[   54.143360] mtd_speedtest: eraseblock read speed is 7811 KiB/s
-[   54.370365] mtd_speedtest: testing page write speed
-[   57.349480] mtd_speedtest: page write speed is 2754 KiB/s
-[   57.354895] mtd_speedtest: testing page read speed
-[   58.410431] mtd_speedtest: page read speed is 7796 KiB/s
-[   58.636805] mtd_speedtest: testing 2 page write speed
-[   61.612427] mtd_speedtest: 2 page write speed is 2757 KiB/s
-[   61.618021] mtd_speedtest: testing 2 page read speed
-[   62.672653] mtd_speedtest: 2 page read speed is 7804 KiB/s
-[   62.678159] mtd_speedtest: Testing erase speed
-[   62.903617] mtd_speedtest: erase speed is 37063 KiB/s
-[   62.908678] mtd_speedtest: Testing 2x multi-block erase speed
-[   63.134083] mtd_speedtest: 2x multi-block erase speed is 37292 KiB/s
-[   63.140442] mtd_speedtest: Testing 4x multi-block erase speed
-[   63.364262] mtd_speedtest: 4x multi-block erase speed is 37566 KiB/s
-[   63.370632] mtd_speedtest: Testing 8x multi-block erase speed
-[   63.595740] mtd_speedtest: 8x multi-block erase speed is 37344 KiB/s
-[   63.602089] mtd_speedtest: Testing 16x multi-block erase speed
-[   63.827426] mtd_speedtest: 16x multi-block erase speed is 37320 KiB/s
-[   63.833860] mtd_speedtest: Testing 32x multi-block erase speed
-[   64.059389] mtd_speedtest: 32x multi-block erase speed is 37288 KiB/s
-[   64.065833] mtd_speedtest: Testing 64x multi-block erase speed
-[   64.290609] mtd_speedtest: 64x multi-block erase speed is 37415 KiB/s
-[   64.297063] mtd_speedtest: finished
-[   64.300555] =================================================
-
-Tested-by: Christian Marangi <ansuelsmth@gmail.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20240922-airoha-spi-fixes-v3-1-f958802b3d68@kernel.org
-Signed-off-by: Mark Brown <broonie@kernel.org>
----
- drivers/spi/spi-airoha-snfi.c | 154 ++++++++++++++++------------------
- 1 file changed, 71 insertions(+), 83 deletions(-)
-
---- a/drivers/spi/spi-airoha-snfi.c
-+++ b/drivers/spi/spi-airoha-snfi.c
-@@ -206,13 +206,6 @@ enum airoha_snand_cs {
-       SPI_CHIP_SEL_LOW,
- };
--struct airoha_snand_dev {
--      size_t buf_len;
--
--      u8 *txrx_buf;
--      dma_addr_t dma_addr;
--};
--
- struct airoha_snand_ctrl {
-       struct device *dev;
-       struct regmap *regmap_ctrl;
-@@ -617,9 +610,9 @@ static bool airoha_snand_supports_op(str
- static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc)
- {
--      struct airoha_snand_dev *as_dev = spi_get_ctldata(desc->mem->spi);
-+      u8 *txrx_buf = spi_get_ctldata(desc->mem->spi);
--      if (!as_dev->txrx_buf)
-+      if (!txrx_buf)
-               return -EINVAL;
-       if (desc->info.offset + desc->info.length > U32_MAX)
-@@ -634,10 +627,11 @@ static int airoha_snand_dirmap_create(st
- static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
-                                       u64 offs, size_t len, void *buf)
- {
--      struct spi_device *spi = desc->mem->spi;
--      struct airoha_snand_dev *as_dev = spi_get_ctldata(spi);
-       struct spi_mem_op *op = &desc->info.op_tmpl;
-+      struct spi_device *spi = desc->mem->spi;
-       struct airoha_snand_ctrl *as_ctrl;
-+      u8 *txrx_buf = spi_get_ctldata(spi);
-+      dma_addr_t dma_addr;
-       u32 val, rd_mode;
-       int err;
-@@ -662,14 +656,17 @@ static ssize_t airoha_snand_dirmap_read(
-       if (err)
-               return err;
--      dma_sync_single_for_device(as_ctrl->dev, as_dev->dma_addr,
--                                 as_dev->buf_len, DMA_BIDIRECTIONAL);
-+      dma_addr = dma_map_single(as_ctrl->dev, txrx_buf, SPI_NAND_CACHE_SIZE,
-+                                DMA_FROM_DEVICE);
-+      err = dma_mapping_error(as_ctrl->dev, dma_addr);
-+      if (err)
-+              return err;
-       /* set dma addr */
-       err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR,
--                         as_dev->dma_addr);
-+                         dma_addr);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       /* set cust sec size */
-       val = as_ctrl->nfi_cfg.sec_size * as_ctrl->nfi_cfg.sec_num;
-@@ -678,58 +675,58 @@ static ssize_t airoha_snand_dirmap_read(
-                                REG_SPI_NFI_SNF_MISC_CTL2,
-                                SPI_NFI_READ_DATA_BYTE_NUM, val);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       /* set read command */
-       err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL2,
-                          op->cmd.opcode);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       /* set read mode */
-       err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL,
-                          FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, rd_mode));
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       /* set read addr */
-       err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, 0x0);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       /* set nfi read */
-       err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-                                SPI_NFI_OPMODE,
-                                FIELD_PREP(SPI_NFI_OPMODE, 6));
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-                             SPI_NFI_READ_MODE | SPI_NFI_DMA_MODE);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CMD, 0x0);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       /* trigger dma start read */
-       err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
-                               SPI_NFI_RD_TRIG);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
-                             SPI_NFI_RD_TRIG);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_read_poll_timeout(as_ctrl->regmap_nfi,
-                                      REG_SPI_NFI_SNF_STA_CTL1, val,
-                                      (val & SPI_NFI_READ_FROM_CACHE_DONE),
-                                      0, 1 * USEC_PER_SEC);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       /*
-        * SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end
-@@ -739,35 +736,41 @@ static ssize_t airoha_snand_dirmap_read(
-                               SPI_NFI_READ_FROM_CACHE_DONE,
-                               SPI_NFI_READ_FROM_CACHE_DONE);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR,
-                                      val, (val & SPI_NFI_AHB_DONE), 0,
-                                      1 * USEC_PER_SEC);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       /* DMA read need delay for data ready from controller to DRAM */
-       udelay(1);
--      dma_sync_single_for_cpu(as_ctrl->dev, as_dev->dma_addr,
--                              as_dev->buf_len, DMA_BIDIRECTIONAL);
-+      dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
-+                       DMA_FROM_DEVICE);
-       err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
-       if (err < 0)
-               return err;
--      memcpy(buf, as_dev->txrx_buf + offs, len);
-+      memcpy(buf, txrx_buf + offs, len);
-       return len;
-+
-+error_dma_unmap:
-+      dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
-+                       DMA_FROM_DEVICE);
-+      return err;
- }
- static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
-                                        u64 offs, size_t len, const void *buf)
- {
--      struct spi_device *spi = desc->mem->spi;
--      struct airoha_snand_dev *as_dev = spi_get_ctldata(spi);
-       struct spi_mem_op *op = &desc->info.op_tmpl;
-+      struct spi_device *spi = desc->mem->spi;
-+      u8 *txrx_buf = spi_get_ctldata(spi);
-       struct airoha_snand_ctrl *as_ctrl;
-+      dma_addr_t dma_addr;
-       u32 wr_mode, val;
-       int err;
-@@ -776,19 +779,20 @@ static ssize_t airoha_snand_dirmap_write
-       if (err < 0)
-               return err;
--      dma_sync_single_for_cpu(as_ctrl->dev, as_dev->dma_addr,
--                              as_dev->buf_len, DMA_BIDIRECTIONAL);
--      memcpy(as_dev->txrx_buf + offs, buf, len);
--      dma_sync_single_for_device(as_ctrl->dev, as_dev->dma_addr,
--                                 as_dev->buf_len, DMA_BIDIRECTIONAL);
-+      memcpy(txrx_buf + offs, buf, len);
-+      dma_addr = dma_map_single(as_ctrl->dev, txrx_buf, SPI_NAND_CACHE_SIZE,
-+                                DMA_TO_DEVICE);
-+      err = dma_mapping_error(as_ctrl->dev, dma_addr);
-+      if (err)
-+              return err;
-       err = airoha_snand_set_mode(as_ctrl, SPI_MODE_DMA);
-       if (err < 0)
--              return err;
-+              goto error_dma_unmap;
-       err = airoha_snand_nfi_config(as_ctrl);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       if (op->cmd.opcode == SPI_NAND_OP_PROGRAM_LOAD_QUAD ||
-           op->cmd.opcode == SPI_NAND_OP_PROGRAM_LOAD_RAMDON_QUAD)
-@@ -797,9 +801,9 @@ static ssize_t airoha_snand_dirmap_write
-               wr_mode = 0;
-       err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR,
--                         as_dev->dma_addr);
-+                         dma_addr);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       val = FIELD_PREP(SPI_NFI_PROG_LOAD_BYTE_NUM,
-                        as_ctrl->nfi_cfg.sec_size * as_ctrl->nfi_cfg.sec_num);
-@@ -807,65 +811,65 @@ static ssize_t airoha_snand_dirmap_write
-                                REG_SPI_NFI_SNF_MISC_CTL2,
-                                SPI_NFI_PROG_LOAD_BYTE_NUM, val);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL1,
-                          FIELD_PREP(SPI_NFI_PG_LOAD_CMD,
-                                     op->cmd.opcode));
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL,
-                          FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, wr_mode));
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, 0x0);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-                               SPI_NFI_READ_MODE);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-                                SPI_NFI_OPMODE,
-                                FIELD_PREP(SPI_NFI_OPMODE, 3));
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
-                             SPI_NFI_DMA_MODE);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CMD, 0x80);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
-                               SPI_NFI_WR_TRIG);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
-                             SPI_NFI_WR_TRIG);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR,
-                                      val, (val & SPI_NFI_AHB_DONE), 0,
-                                      1 * USEC_PER_SEC);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       err = regmap_read_poll_timeout(as_ctrl->regmap_nfi,
-                                      REG_SPI_NFI_SNF_STA_CTL1, val,
-                                      (val & SPI_NFI_LOAD_TO_CACHE_DONE),
-                                      0, 1 * USEC_PER_SEC);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-       /*
-        * SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the end
-@@ -875,13 +879,20 @@ static ssize_t airoha_snand_dirmap_write
-                               SPI_NFI_LOAD_TO_CACHE_DONE,
-                               SPI_NFI_LOAD_TO_CACHE_DONE);
-       if (err)
--              return err;
-+              goto error_dma_unmap;
-+      dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
-+                       DMA_TO_DEVICE);
-       err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
-       if (err < 0)
-               return err;
-       return len;
-+
-+error_dma_unmap:
-+      dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
-+                       DMA_TO_DEVICE);
-+      return err;
- }
- static int airoha_snand_exec_op(struct spi_mem *mem,
-@@ -956,42 +967,20 @@ static const struct spi_controller_mem_o
- static int airoha_snand_setup(struct spi_device *spi)
- {
-       struct airoha_snand_ctrl *as_ctrl;
--      struct airoha_snand_dev *as_dev;
--
--      as_ctrl = spi_controller_get_devdata(spi->controller);
--
--      as_dev = devm_kzalloc(as_ctrl->dev, sizeof(*as_dev), GFP_KERNEL);
--      if (!as_dev)
--              return -ENOMEM;
-+      u8 *txrx_buf;
-       /* prepare device buffer */
--      as_dev->buf_len = SPI_NAND_CACHE_SIZE;
--      as_dev->txrx_buf = devm_kzalloc(as_ctrl->dev, as_dev->buf_len,
--                                      GFP_KERNEL);
--      if (!as_dev->txrx_buf)
--              return -ENOMEM;
--
--      as_dev->dma_addr = dma_map_single(as_ctrl->dev, as_dev->txrx_buf,
--                                        as_dev->buf_len, DMA_BIDIRECTIONAL);
--      if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr))
-+      as_ctrl = spi_controller_get_devdata(spi->controller);
-+      txrx_buf = devm_kzalloc(as_ctrl->dev, SPI_NAND_CACHE_SIZE,
-+                              GFP_KERNEL);
-+      if (!txrx_buf)
-               return -ENOMEM;
--      spi_set_ctldata(spi, as_dev);
-+      spi_set_ctldata(spi, txrx_buf);
-       return 0;
- }
--static void airoha_snand_cleanup(struct spi_device *spi)
--{
--      struct airoha_snand_dev *as_dev = spi_get_ctldata(spi);
--      struct airoha_snand_ctrl *as_ctrl;
--
--      as_ctrl = spi_controller_get_devdata(spi->controller);
--      dma_unmap_single(as_ctrl->dev, as_dev->dma_addr,
--                       as_dev->buf_len, DMA_BIDIRECTIONAL);
--      spi_set_ctldata(spi, NULL);
--}
--
- static int airoha_snand_nfi_setup(struct airoha_snand_ctrl *as_ctrl)
- {
-       u32 val, sec_size, sec_num;
-@@ -1093,7 +1082,6 @@ static int airoha_snand_probe(struct pla
-       ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
-       ctrl->mode_bits = SPI_RX_DUAL;
-       ctrl->setup = airoha_snand_setup;
--      ctrl->cleanup = airoha_snand_cleanup;
-       device_set_node(&ctrl->dev, dev_fwnode(dev));
-       err = airoha_snand_nfi_setup(as_ctrl);
diff --git a/target/linux/airoha/patches-6.6/029-v6.12-net-dsa-mt7530-Add-EN7581-support.patch b/target/linux/airoha/patches-6.6/029-v6.12-net-dsa-mt7530-Add-EN7581-support.patch
deleted file mode 100644 (file)
index d071639..0000000
+++ /dev/null
@@ -1,184 +0,0 @@
-From 2b0229f67932e4b9e2f458bf286903582bd30740 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 1 Aug 2024 09:35:12 +0200
-Subject: [PATCH] net: dsa: mt7530: Add EN7581 support
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Introduce support for the DSA built-in switch available on the EN7581
-development board. EN7581 support is similar to MT7988 one except
-it requires to set MT7530_FORCE_MODE bit in MT753X_PMCR_P register
-for on cpu port.
-
-Tested-by: Benjamin Larsson <benjamin.larsson@genexis.eu>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Arınç ÜNAL <arinc.unal@arinc9.com>
-Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/mt7530-mmio.c |  1 +
- drivers/net/dsa/mt7530.c      | 49 ++++++++++++++++++++++++++++++-----
- drivers/net/dsa/mt7530.h      | 20 ++++++++++----
- 3 files changed, 59 insertions(+), 11 deletions(-)
-
---- a/drivers/net/dsa/mt7530-mmio.c
-+++ b/drivers/net/dsa/mt7530-mmio.c
-@@ -11,6 +11,7 @@
- #include "mt7530.h"
- static const struct of_device_id mt7988_of_match[] = {
-+      { .compatible = "airoha,en7581-switch", .data = &mt753x_table[ID_EN7581], },
-       { .compatible = "mediatek,mt7988-switch", .data = &mt753x_table[ID_MT7988], },
-       { /* sentinel */ },
- };
---- a/drivers/net/dsa/mt7530.c
-+++ b/drivers/net/dsa/mt7530.c
-@@ -1152,7 +1152,8 @@ mt753x_cpu_port_enable(struct dsa_switch
-        * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
-        * is affine to the inbound user port.
-        */
--      if (priv->id == ID_MT7531 || priv->id == ID_MT7988)
-+      if (priv->id == ID_MT7531 || priv->id == ID_MT7988 ||
-+          priv->id == ID_EN7581)
-               mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
-       /* CPU port gets connected to all user ports of
-@@ -2207,7 +2208,7 @@ mt7530_setup_irq(struct mt7530_priv *pri
-               return priv->irq ? : -EINVAL;
-       }
--      if (priv->id == ID_MT7988)
-+      if (priv->id == ID_MT7988 || priv->id == ID_EN7581)
-               priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
-                                                        &mt7988_irq_domain_ops,
-                                                        priv);
-@@ -2438,8 +2439,10 @@ mt7530_setup(struct dsa_switch *ds)
-               /* Clear link settings and enable force mode to force link down
-                * on all ports until they're enabled later.
-                */
--              mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
--                         MT7530_FORCE_MODE, MT7530_FORCE_MODE);
-+              mt7530_rmw(priv, MT753X_PMCR_P(i),
-+                         PMCR_LINK_SETTINGS_MASK |
-+                         MT753X_FORCE_MODE(priv->id),
-+                         MT753X_FORCE_MODE(priv->id));
-               /* Disable forwarding by default on all ports */
-               mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
-@@ -2553,8 +2556,10 @@ mt7531_setup_common(struct dsa_switch *d
-               /* Clear link settings and enable force mode to force link down
-                * on all ports until they're enabled later.
-                */
--              mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
--                         MT7531_FORCE_MODE_MASK, MT7531_FORCE_MODE_MASK);
-+              mt7530_rmw(priv, MT753X_PMCR_P(i),
-+                         PMCR_LINK_SETTINGS_MASK |
-+                         MT753X_FORCE_MODE(priv->id),
-+                         MT753X_FORCE_MODE(priv->id));
-               /* Disable forwarding by default on all ports */
-               mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
-@@ -2779,6 +2784,28 @@ static void mt7988_mac_port_get_caps(str
-       }
- }
-+static void en7581_mac_port_get_caps(struct dsa_switch *ds, int port,
-+                                   struct phylink_config *config)
-+{
-+      switch (port) {
-+      /* Ports which are connected to switch PHYs. There is no MII pinout. */
-+      case 0 ... 4:
-+              __set_bit(PHY_INTERFACE_MODE_INTERNAL,
-+                        config->supported_interfaces);
-+
-+              config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
-+              break;
-+
-+      /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
-+      case 6:
-+              __set_bit(PHY_INTERFACE_MODE_INTERNAL,
-+                        config->supported_interfaces);
-+
-+              config->mac_capabilities |= MAC_10000FD;
-+              break;
-+      }
-+}
-+
- static void
- mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
-                 phy_interface_t interface)
-@@ -3216,6 +3243,16 @@ const struct mt753x_info mt753x_table[]
-               .phy_write_c45 = mt7531_ind_c45_phy_write,
-               .mac_port_get_caps = mt7988_mac_port_get_caps,
-       },
-+      [ID_EN7581] = {
-+              .id = ID_EN7581,
-+              .pcs_ops = &mt7530_pcs_ops,
-+              .sw_setup = mt7988_setup,
-+              .phy_read_c22 = mt7531_ind_c22_phy_read,
-+              .phy_write_c22 = mt7531_ind_c22_phy_write,
-+              .phy_read_c45 = mt7531_ind_c45_phy_read,
-+              .phy_write_c45 = mt7531_ind_c45_phy_write,
-+              .mac_port_get_caps = en7581_mac_port_get_caps,
-+      },
- };
- EXPORT_SYMBOL_GPL(mt753x_table);
---- a/drivers/net/dsa/mt7530.h
-+++ b/drivers/net/dsa/mt7530.h
-@@ -19,6 +19,7 @@ enum mt753x_id {
-       ID_MT7621 = 1,
-       ID_MT7531 = 2,
-       ID_MT7988 = 3,
-+      ID_EN7581 = 4,
- };
- #define       NUM_TRGMII_CTRL                 5
-@@ -64,25 +65,30 @@ enum mt753x_id {
- #define  MT7531_CPU_PMAP(x)           FIELD_PREP(MT7531_CPU_PMAP_MASK, x)
- #define MT753X_MIRROR_REG(id)         ((id == ID_MT7531 || \
--                                        id == ID_MT7988) ? \
-+                                        id == ID_MT7988 || \
-+                                        id == ID_EN7581) ? \
-                                        MT7531_CFC : MT753X_MFC)
- #define MT753X_MIRROR_EN(id)          ((id == ID_MT7531 || \
--                                        id == ID_MT7988) ? \
-+                                        id == ID_MT7988 || \
-+                                        id == ID_EN7581) ? \
-                                        MT7531_MIRROR_EN : MT7530_MIRROR_EN)
- #define MT753X_MIRROR_PORT_MASK(id)   ((id == ID_MT7531 || \
--                                        id == ID_MT7988) ? \
-+                                        id == ID_MT7988 || \
-+                                        id == ID_EN7581) ? \
-                                        MT7531_MIRROR_PORT_MASK : \
-                                        MT7530_MIRROR_PORT_MASK)
- #define MT753X_MIRROR_PORT_GET(id, val)       ((id == ID_MT7531 || \
--                                        id == ID_MT7988) ? \
-+                                        id == ID_MT7988 || \
-+                                        id == ID_EN7581) ? \
-                                        MT7531_MIRROR_PORT_GET(val) : \
-                                        MT7530_MIRROR_PORT_GET(val))
- #define MT753X_MIRROR_PORT_SET(id, val)       ((id == ID_MT7531 || \
--                                        id == ID_MT7988) ? \
-+                                        id == ID_MT7988 || \
-+                                        id == ID_EN7581) ? \
-                                        MT7531_MIRROR_PORT_SET(val) : \
-                                        MT7530_MIRROR_PORT_SET(val))
-@@ -355,6 +361,10 @@ enum mt7530_vlan_port_acc_frm {
-                                        MT7531_FORCE_MODE_TX_FC | \
-                                        MT7531_FORCE_MODE_EEE100 | \
-                                        MT7531_FORCE_MODE_EEE1G)
-+#define  MT753X_FORCE_MODE(id)                ((id == ID_MT7531 || \
-+                                        id == ID_MT7988) ? \
-+                                       MT7531_FORCE_MODE_MASK : \
-+                                       MT7530_FORCE_MODE)
- #define  PMCR_LINK_SETTINGS_MASK      (PMCR_MAC_TX_EN | PMCR_MAC_RX_EN | \
-                                        PMCR_FORCE_EEE1G | \
-                                        PMCR_FORCE_EEE100 | \
diff --git a/target/linux/airoha/patches-6.6/030-v6.13-hwrng-airoha-add-support-for-Airoha-EN7581-TRNG.patch b/target/linux/airoha/patches-6.6/030-v6.13-hwrng-airoha-add-support-for-Airoha-EN7581-TRNG.patch
deleted file mode 100644 (file)
index e21fb56..0000000
+++ /dev/null
@@ -1,306 +0,0 @@
-From 5c5db81bff81a0fcd9ad998543d4241cbfe4742f Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Thu, 17 Oct 2024 14:44:38 +0200
-Subject: [PATCH 2/2] hwrng: airoha - add support for Airoha EN7581 TRNG
-
-Add support for Airoha TRNG. The Airoha SoC provide a True RNG module
-that can output 4 bytes of raw data at times.
-
-The module makes use of various noise source to provide True Random
-Number Generation.
-
-On probe the module is reset to operate Health Test and verify correct
-execution of it.
-
-The module can also provide DRBG function but the execution mode is
-mutually exclusive, running as TRNG doesn't permit to also run it as
-DRBG.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Reviewed-by: Martin Kaiser <martin@kaiser.cx>
-Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
----
- drivers/char/hw_random/Kconfig       |  13 ++
- drivers/char/hw_random/Makefile      |   1 +
- drivers/char/hw_random/airoha-trng.c | 243 +++++++++++++++++++++++++++
- 3 files changed, 257 insertions(+)
- create mode 100644 drivers/char/hw_random/airoha-trng.c
-
---- a/drivers/char/hw_random/Kconfig
-+++ b/drivers/char/hw_random/Kconfig
-@@ -62,6 +62,19 @@ config HW_RANDOM_AMD
-         If unsure, say Y.
-+config HW_RANDOM_AIROHA
-+      tristate "Airoha True HW Random Number Generator support"
-+      depends on ARCH_AIROHA || COMPILE_TEST
-+      default HW_RANDOM
-+      help
-+        This driver provides kernel-side support for the True Random Number
-+        Generator hardware found on Airoha SoC.
-+
-+        To compile this driver as a module, choose M here: the
-+        module will be called airoha-rng.
-+
-+        If unsure, say Y.
-+
- config HW_RANDOM_ATMEL
-       tristate "Atmel Random Number Generator support"
-       depends on (ARCH_AT91 || COMPILE_TEST)
---- a/drivers/char/hw_random/Makefile
-+++ b/drivers/char/hw_random/Makefile
-@@ -8,6 +8,7 @@ rng-core-y := core.o
- obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
- obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
- obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
-+obj-$(CONFIG_HW_RANDOM_AIROHA) += airoha-trng.o
- obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
- obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o
- obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
---- /dev/null
-+++ b/drivers/char/hw_random/airoha-trng.c
-@@ -0,0 +1,243 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/* Copyright (C) 2024 Christian Marangi */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/mod_devicetable.h>
-+#include <linux/bitfield.h>
-+#include <linux/delay.h>
-+#include <linux/hw_random.h>
-+#include <linux/interrupt.h>
-+#include <linux/io.h>
-+#include <linux/iopoll.h>
-+#include <linux/platform_device.h>
-+
-+#define TRNG_IP_RDY                   0x800
-+#define   CNT_TRANS                   GENMASK(15, 8)
-+#define   SAMPLE_RDY                  BIT(0)
-+#define TRNG_NS_SEK_AND_DAT_EN                0x804
-+#define         RNG_EN                        BIT(31) /* referenced as ring_en */
-+#define         RAW_DATA_EN                   BIT(16)
-+#define TRNG_HEALTH_TEST_SW_RST               0x808
-+#define   SW_RST                      BIT(0) /* Active High */
-+#define TRNG_INTR_EN                  0x818
-+#define   INTR_MASK                   BIT(16)
-+#define   CONTINUOUS_HEALTH_INITR_EN  BIT(2)
-+#define   SW_STARTUP_INITR_EN         BIT(1)
-+#define   RST_STARTUP_INITR_EN                BIT(0)
-+/* Notice that Health Test are done only out of Reset and with RNG_EN */
-+#define TRNG_HEALTH_TEST_STATUS               0x824
-+#define   CONTINUOUS_HEALTH_AP_TEST_FAIL BIT(23)
-+#define   CONTINUOUS_HEALTH_RC_TEST_FAIL BIT(22)
-+#define   SW_STARTUP_TEST_DONE                BIT(21)
-+#define   SW_STARTUP_AP_TEST_FAIL     BIT(20)
-+#define   SW_STARTUP_RC_TEST_FAIL     BIT(19)
-+#define   RST_STARTUP_TEST_DONE               BIT(18)
-+#define   RST_STARTUP_AP_TEST_FAIL    BIT(17)
-+#define   RST_STARTUP_RC_TEST_FAIL    BIT(16)
-+#define   RAW_DATA_VALID              BIT(7)
-+
-+#define TRNG_RAW_DATA_OUT             0x828
-+
-+#define TRNG_CNT_TRANS_VALID          0x80
-+#define BUSY_LOOP_SLEEP                       10
-+#define BUSY_LOOP_TIMEOUT             (BUSY_LOOP_SLEEP * 10000)
-+
-+struct airoha_trng {
-+      void __iomem *base;
-+      struct hwrng rng;
-+      struct device *dev;
-+
-+      struct completion rng_op_done;
-+};
-+
-+static int airoha_trng_irq_mask(struct airoha_trng *trng)
-+{
-+      u32 val;
-+
-+      val = readl(trng->base + TRNG_INTR_EN);
-+      val |= INTR_MASK;
-+      writel(val, trng->base + TRNG_INTR_EN);
-+
-+      return 0;
-+}
-+
-+static int airoha_trng_irq_unmask(struct airoha_trng *trng)
-+{
-+      u32 val;
-+
-+      val = readl(trng->base + TRNG_INTR_EN);
-+      val &= ~INTR_MASK;
-+      writel(val, trng->base + TRNG_INTR_EN);
-+
-+      return 0;
-+}
-+
-+static int airoha_trng_init(struct hwrng *rng)
-+{
-+      struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
-+      int ret;
-+      u32 val;
-+
-+      val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
-+      val |= RNG_EN;
-+      writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
-+
-+      /* Set out of SW Reset */
-+      airoha_trng_irq_unmask(trng);
-+      writel(0, trng->base + TRNG_HEALTH_TEST_SW_RST);
-+
-+      ret = wait_for_completion_timeout(&trng->rng_op_done, BUSY_LOOP_TIMEOUT);
-+      if (ret <= 0) {
-+              dev_err(trng->dev, "Timeout waiting for Health Check\n");
-+              airoha_trng_irq_mask(trng);
-+              return -ENODEV;
-+      }
-+
-+      /* Check if Health Test Failed */
-+      val = readl(trng->base + TRNG_HEALTH_TEST_STATUS);
-+      if (val & (RST_STARTUP_AP_TEST_FAIL | RST_STARTUP_RC_TEST_FAIL)) {
-+              dev_err(trng->dev, "Health Check fail: %s test fail\n",
-+                      val & RST_STARTUP_AP_TEST_FAIL ? "AP" : "RC");
-+              return -ENODEV;
-+      }
-+
-+      /* Check if IP is ready */
-+      ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
-+                               val & SAMPLE_RDY, 10, 1000);
-+      if (ret < 0) {
-+              dev_err(trng->dev, "Timeout waiting for IP ready");
-+              return -ENODEV;
-+      }
-+
-+      /* CNT_TRANS must be 0x80 for IP to be considered ready */
-+      ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
-+                               FIELD_GET(CNT_TRANS, val) == TRNG_CNT_TRANS_VALID,
-+                               10, 1000);
-+      if (ret < 0) {
-+              dev_err(trng->dev, "Timeout waiting for IP ready");
-+              return -ENODEV;
-+      }
-+
-+      return 0;
-+}
-+
-+static void airoha_trng_cleanup(struct hwrng *rng)
-+{
-+      struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
-+      u32 val;
-+
-+      val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
-+      val &= ~RNG_EN;
-+      writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
-+
-+      /* Put it in SW Reset */
-+      writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
-+}
-+
-+static int airoha_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
-+{
-+      struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
-+      u32 *data = buf;
-+      u32 status;
-+      int ret;
-+
-+      ret = readl_poll_timeout(trng->base + TRNG_HEALTH_TEST_STATUS, status,
-+                               status & RAW_DATA_VALID, 10, 1000);
-+      if (ret < 0) {
-+              dev_err(trng->dev, "Timeout waiting for TRNG RAW Data valid\n");
-+              return ret;
-+      }
-+
-+      *data = readl(trng->base + TRNG_RAW_DATA_OUT);
-+
-+      return 4;
-+}
-+
-+static irqreturn_t airoha_trng_irq(int irq, void *priv)
-+{
-+      struct airoha_trng *trng = (struct airoha_trng *)priv;
-+
-+      airoha_trng_irq_mask(trng);
-+      /* Just complete the task, we will read the value later */
-+      complete(&trng->rng_op_done);
-+
-+      return IRQ_HANDLED;
-+}
-+
-+static int airoha_trng_probe(struct platform_device *pdev)
-+{
-+      struct device *dev = &pdev->dev;
-+      struct airoha_trng *trng;
-+      int irq, ret;
-+      u32 val;
-+
-+      trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
-+      if (!trng)
-+              return -ENOMEM;
-+
-+      trng->base = devm_platform_ioremap_resource(pdev, 0);
-+      if (IS_ERR(trng->base))
-+              return PTR_ERR(trng->base);
-+
-+      irq = platform_get_irq(pdev, 0);
-+      if (irq < 0)
-+              return irq;
-+
-+      airoha_trng_irq_mask(trng);
-+      ret = devm_request_irq(&pdev->dev, irq, airoha_trng_irq, 0,
-+                             pdev->name, (void *)trng);
-+      if (ret) {
-+              dev_err(dev, "Can't get interrupt working.\n");
-+              return ret;
-+      }
-+
-+      init_completion(&trng->rng_op_done);
-+
-+      /* Enable interrupt for SW reset Health Check */
-+      val = readl(trng->base + TRNG_INTR_EN);
-+      val |= RST_STARTUP_INITR_EN;
-+      writel(val, trng->base + TRNG_INTR_EN);
-+
-+      /* Set output to raw data */
-+      val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
-+      val |= RAW_DATA_EN;
-+      writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
-+
-+      /* Put it in SW Reset */
-+      writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
-+
-+      trng->dev = dev;
-+      trng->rng.name = pdev->name;
-+      trng->rng.init = airoha_trng_init;
-+      trng->rng.cleanup = airoha_trng_cleanup;
-+      trng->rng.read = airoha_trng_read;
-+
-+      ret = devm_hwrng_register(dev, &trng->rng);
-+      if (ret) {
-+              dev_err(dev, "failed to register rng device: %d\n", ret);
-+              return ret;
-+      }
-+
-+      return 0;
-+}
-+
-+static const struct of_device_id airoha_trng_of_match[] = {
-+      { .compatible = "airoha,en7581-trng", },
-+      {},
-+};
-+MODULE_DEVICE_TABLE(of, airoha_trng_of_match);
-+
-+static struct platform_driver airoha_trng_driver = {
-+      .driver = {
-+              .name = "airoha-trng",
-+              .of_match_table = airoha_trng_of_match,
-+      },
-+      .probe = airoha_trng_probe,
-+};
-+
-+module_platform_driver(airoha_trng_driver);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
-+MODULE_DESCRIPTION("Airoha True Random Number Generator driver");
diff --git a/target/linux/airoha/patches-6.6/031-01-v6.13-net-airoha-Read-completion-queue-data-in-airoha_qdma.patch b/target/linux/airoha/patches-6.6/031-01-v6.13-net-airoha-Read-completion-queue-data-in-airoha_qdma.patch
deleted file mode 100644 (file)
index 306ce65..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-From 3affa310de523d63e52ea8e2efb3c476df29e414 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 29 Oct 2024 13:17:09 +0100
-Subject: [PATCH 1/2] net: airoha: Read completion queue data in
- airoha_qdma_tx_napi_poll()
-
-In order to avoid any possible race, read completion queue head and
-pending entry in airoha_qdma_tx_napi_poll routine instead of doing it in
-airoha_irq_handler. Remove unused airoha_tx_irq_queue unused fields.
-This is a preliminary patch to add Qdisc offload for airoha_eth driver.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20241029-airoha-en7581-tx-napi-work-v1-1-96ad1686b946@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 31 +++++++++-------------
- 1 file changed, 13 insertions(+), 18 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -752,11 +752,9 @@ struct airoha_tx_irq_queue {
-       struct airoha_qdma *qdma;
-       struct napi_struct napi;
--      u32 *q;
-       int size;
--      int queued;
--      u16 head;
-+      u32 *q;
- };
- struct airoha_hw_stats {
-@@ -1656,25 +1654,31 @@ static int airoha_qdma_init_rx(struct ai
- static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
- {
-       struct airoha_tx_irq_queue *irq_q;
-+      int id, done = 0, irq_queued;
-       struct airoha_qdma *qdma;
-       struct airoha_eth *eth;
--      int id, done = 0;
-+      u32 status, head;
-       irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
-       qdma = irq_q->qdma;
-       id = irq_q - &qdma->q_tx_irq[0];
-       eth = qdma->eth;
--      while (irq_q->queued > 0 && done < budget) {
--              u32 qid, last, val = irq_q->q[irq_q->head];
-+      status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
-+      head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
-+      head = head % irq_q->size;
-+      irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
-+
-+      while (irq_queued > 0 && done < budget) {
-+              u32 qid, last, val = irq_q->q[head];
-               struct airoha_queue *q;
-               if (val == 0xff)
-                       break;
--              irq_q->q[irq_q->head] = 0xff; /* mark as done */
--              irq_q->head = (irq_q->head + 1) % irq_q->size;
--              irq_q->queued--;
-+              irq_q->q[head] = 0xff; /* mark as done */
-+              head = (head + 1) % irq_q->size;
-+              irq_queued--;
-               done++;
-               last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
-@@ -2026,20 +2030,11 @@ static irqreturn_t airoha_irq_handler(in
-       if (intr[0] & INT_TX_MASK) {
-               for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
--                      struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i];
--                      u32 status, head;
--
-                       if (!(intr[0] & TX_DONE_INT_MASK(i)))
-                               continue;
-                       airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
-                                               TX_DONE_INT_MASK(i));
--
--                      status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
--                      head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
--                      irq_q->head = head % irq_q->size;
--                      irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
--
-                       napi_schedule(&qdma->q_tx_irq[i].napi);
-               }
-       }
diff --git a/target/linux/airoha/patches-6.6/031-02-v6.13-net-airoha-Simplify-Tx-napi-logic.patch b/target/linux/airoha/patches-6.6/031-02-v6.13-net-airoha-Simplify-Tx-napi-logic.patch
deleted file mode 100644 (file)
index b35f828..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-From 0c729f53b8c33b9e5eadc2d5e673759e3510501e Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 29 Oct 2024 13:17:10 +0100
-Subject: [PATCH 2/2] net: airoha: Simplify Tx napi logic
-
-Simplify Tx napi logic relying just on the packet index provided by
-completion queue indicating the completed packet that can be removed
-from the Tx DMA ring.
-This is a preliminary patch to add Qdisc offload for airoha_eth driver.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20241029-airoha-en7581-tx-napi-work-v1-2-96ad1686b946@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 73 ++++++++++++----------
- 1 file changed, 41 insertions(+), 32 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -1670,8 +1670,12 @@ static int airoha_qdma_tx_napi_poll(stru
-       irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
-       while (irq_queued > 0 && done < budget) {
--              u32 qid, last, val = irq_q->q[head];
-+              u32 qid, val = irq_q->q[head];
-+              struct airoha_qdma_desc *desc;
-+              struct airoha_queue_entry *e;
-               struct airoha_queue *q;
-+              u32 index, desc_ctrl;
-+              struct sk_buff *skb;
-               if (val == 0xff)
-                       break;
-@@ -1681,9 +1685,7 @@ static int airoha_qdma_tx_napi_poll(stru
-               irq_queued--;
-               done++;
--              last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
-               qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
--
-               if (qid >= ARRAY_SIZE(qdma->q_tx))
-                       continue;
-@@ -1691,46 +1693,53 @@ static int airoha_qdma_tx_napi_poll(stru
-               if (!q->ndesc)
-                       continue;
-+              index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
-+              if (index >= q->ndesc)
-+                      continue;
-+
-               spin_lock_bh(&q->lock);
--              while (q->queued > 0) {
--                      struct airoha_qdma_desc *desc = &q->desc[q->tail];
--                      struct airoha_queue_entry *e = &q->entry[q->tail];
--                      u32 desc_ctrl = le32_to_cpu(desc->ctrl);
--                      struct sk_buff *skb = e->skb;
--                      u16 index = q->tail;
--
--                      if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
--                          !(desc_ctrl & QDMA_DESC_DROP_MASK))
--                              break;
-+              if (!q->queued)
-+                      goto unlock;
--                      q->tail = (q->tail + 1) % q->ndesc;
--                      q->queued--;
-+              desc = &q->desc[index];
-+              desc_ctrl = le32_to_cpu(desc->ctrl);
--                      dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
--                                       DMA_TO_DEVICE);
--
--                      WRITE_ONCE(desc->msg0, 0);
--                      WRITE_ONCE(desc->msg1, 0);
-+              if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
-+                  !(desc_ctrl & QDMA_DESC_DROP_MASK))
-+                      goto unlock;
-+
-+              e = &q->entry[index];
-+              skb = e->skb;
-+
-+              dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
-+                               DMA_TO_DEVICE);
-+              memset(e, 0, sizeof(*e));
-+              WRITE_ONCE(desc->msg0, 0);
-+              WRITE_ONCE(desc->msg1, 0);
-+              q->queued--;
-+
-+              /* completion ring can report out-of-order indexes if hw QoS
-+               * is enabled and packets with different priority are queued
-+               * to same DMA ring. Take into account possible out-of-order
-+               * reports incrementing DMA ring tail pointer
-+               */
-+              while (q->tail != q->head && !q->entry[q->tail].dma_addr)
-+                      q->tail = (q->tail + 1) % q->ndesc;
--                      if (skb) {
--                              u16 queue = skb_get_queue_mapping(skb);
--                              struct netdev_queue *txq;
--
--                              txq = netdev_get_tx_queue(skb->dev, queue);
--                              netdev_tx_completed_queue(txq, 1, skb->len);
--                              if (netif_tx_queue_stopped(txq) &&
--                                  q->ndesc - q->queued >= q->free_thr)
--                                      netif_tx_wake_queue(txq);
--
--                              dev_kfree_skb_any(skb);
--                              e->skb = NULL;
--                      }
-+              if (skb) {
-+                      u16 queue = skb_get_queue_mapping(skb);
-+                      struct netdev_queue *txq;
-+
-+                      txq = netdev_get_tx_queue(skb->dev, queue);
-+                      netdev_tx_completed_queue(txq, 1, skb->len);
-+                      if (netif_tx_queue_stopped(txq) &&
-+                          q->ndesc - q->queued >= q->free_thr)
-+                              netif_tx_wake_queue(txq);
--                      if (index == last)
--                              break;
-+                      dev_kfree_skb_any(skb);
-               }
--
-+unlock:
-               spin_unlock_bh(&q->lock);
-       }
diff --git a/target/linux/airoha/patches-6.6/032-v6.13-watchdog-Add-support-for-Airoha-EN7851-watchdog.patch b/target/linux/airoha/patches-6.6/032-v6.13-watchdog-Add-support-for-Airoha-EN7851-watchdog.patch
deleted file mode 100644 (file)
index 02dbadf..0000000
+++ /dev/null
@@ -1,267 +0,0 @@
-From 3cf67f3769b8227ca75ca7102180a2e270ee01aa Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Fri, 11 Oct 2024 12:43:53 +0200
-Subject: [PATCH] watchdog: Add support for Airoha EN7851 watchdog
-
-Add support for Airoha EN7851 watchdog. This is a very basic watchdog
-with no pretimeout support, max timeout is 28 seconds and it ticks based
-on half the SoC BUS clock.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Reviewed-by: Guenter Roeck <linux@roeck-us.net>
-Link: https://lore.kernel.org/r/20241011104411.28659-2-ansuelsmth@gmail.com
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
-Signed-off-by: Wim Van Sebroeck <wim@linux-watchdog.org>
----
- drivers/watchdog/Kconfig      |   8 ++
- drivers/watchdog/Makefile     |   1 +
- drivers/watchdog/airoha_wdt.c | 216 ++++++++++++++++++++++++++++++++++
- 3 files changed, 225 insertions(+)
- create mode 100644 drivers/watchdog/airoha_wdt.c
-
---- a/drivers/watchdog/Kconfig
-+++ b/drivers/watchdog/Kconfig
-@@ -372,6 +372,14 @@ config SL28CPLD_WATCHDOG
- # ARM Architecture
-+config AIROHA_WATCHDOG
-+      tristate "Airoha EN7581 Watchdog"
-+      depends on ARCH_AIROHA || COMPILE_TEST
-+      select WATCHDOG_CORE
-+      help
-+        Watchdog timer embedded into Airoha SoC. This will reboot your
-+        system when the timeout is reached.
-+
- config ARM_SP805_WATCHDOG
-       tristate "ARM SP805 Watchdog"
-       depends on (ARM || ARM64 || COMPILE_TEST) && ARM_AMBA
---- a/drivers/watchdog/Makefile
-+++ b/drivers/watchdog/Makefile
-@@ -40,6 +40,7 @@ obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.
- obj-$(CONFIG_ARM_SP805_WATCHDOG) += sp805_wdt.o
- obj-$(CONFIG_ARM_SBSA_WATCHDOG) += sbsa_gwdt.o
- obj-$(CONFIG_ARMADA_37XX_WATCHDOG) += armada_37xx_wdt.o
-+obj-$(CONFIG_AIROHA_WATCHDOG) += airoha_wdt.o
- obj-$(CONFIG_ASM9260_WATCHDOG) += asm9260_wdt.o
- obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o
- obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o
---- /dev/null
-+++ b/drivers/watchdog/airoha_wdt.c
-@@ -0,0 +1,216 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ *    Airoha Watchdog Driver
-+ *
-+ *    Copyright (c) 2024, AIROHA  All rights reserved.
-+ *
-+ *    Mayur Kumar <mayur.kumar@airoha.com>
-+ *    Christian Marangi <ansuelsmth@gmail.com>
-+ *
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/types.h>
-+#include <linux/bitfield.h>
-+#include <linux/clk.h>
-+#include <linux/io.h>
-+#include <linux/math.h>
-+#include <linux/of.h>
-+#include <linux/platform_device.h>
-+#include <linux/watchdog.h>
-+
-+/* Base address of timer and watchdog registers */
-+#define TIMER_CTRL                    0x0
-+#define   WDT_ENABLE                  BIT(25)
-+#define   WDT_TIMER_INTERRUPT         BIT(21)
-+/* Timer3 is used as Watchdog Timer */
-+#define   WDT_TIMER_ENABLE            BIT(5)
-+#define WDT_TIMER_LOAD_VALUE          0x2c
-+#define WDT_TIMER_CUR_VALUE           0x30
-+#define  WDT_TIMER_VAL                        GENMASK(31, 0)
-+#define WDT_RELOAD                    0x38
-+#define   WDT_RLD                     BIT(0)
-+
-+/* Airoha watchdog structure description */
-+struct airoha_wdt_desc {
-+      struct watchdog_device wdog_dev;
-+      unsigned int wdt_freq;
-+      void __iomem *base;
-+};
-+
-+#define WDT_HEARTBEAT                 24
-+static int heartbeat = WDT_HEARTBEAT;
-+module_param(heartbeat, int, 0);
-+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. (default="
-+               __MODULE_STRING(WDT_HEARTBEAT) ")");
-+
-+static bool nowayout = WATCHDOG_NOWAYOUT;
-+module_param(nowayout, bool, 0);
-+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
-+               __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-+
-+static int airoha_wdt_start(struct watchdog_device *wdog_dev)
-+{
-+      struct airoha_wdt_desc *airoha_wdt = watchdog_get_drvdata(wdog_dev);
-+      u32 val;
-+
-+      val = readl(airoha_wdt->base + TIMER_CTRL);
-+      val |= (WDT_TIMER_ENABLE | WDT_ENABLE | WDT_TIMER_INTERRUPT);
-+      writel(val, airoha_wdt->base + TIMER_CTRL);
-+      val = wdog_dev->timeout * airoha_wdt->wdt_freq;
-+      writel(val, airoha_wdt->base + WDT_TIMER_LOAD_VALUE);
-+
-+      return 0;
-+}
-+
-+static int airoha_wdt_stop(struct watchdog_device *wdog_dev)
-+{
-+      struct airoha_wdt_desc *airoha_wdt = watchdog_get_drvdata(wdog_dev);
-+      u32 val;
-+
-+      val = readl(airoha_wdt->base + TIMER_CTRL);
-+      val &= (~WDT_ENABLE & ~WDT_TIMER_ENABLE);
-+      writel(val, airoha_wdt->base + TIMER_CTRL);
-+
-+      return 0;
-+}
-+
-+static int airoha_wdt_ping(struct watchdog_device *wdog_dev)
-+{
-+      struct airoha_wdt_desc *airoha_wdt = watchdog_get_drvdata(wdog_dev);
-+      u32 val;
-+
-+      val = readl(airoha_wdt->base + WDT_RELOAD);
-+      val |= WDT_RLD;
-+      writel(val, airoha_wdt->base + WDT_RELOAD);
-+
-+      return 0;
-+}
-+
-+static int airoha_wdt_set_timeout(struct watchdog_device *wdog_dev, unsigned int timeout)
-+{
-+      wdog_dev->timeout = timeout;
-+
-+      if (watchdog_active(wdog_dev)) {
-+              airoha_wdt_stop(wdog_dev);
-+              return airoha_wdt_start(wdog_dev);
-+      }
-+
-+      return 0;
-+}
-+
-+static unsigned int airoha_wdt_get_timeleft(struct watchdog_device *wdog_dev)
-+{
-+      struct airoha_wdt_desc *airoha_wdt = watchdog_get_drvdata(wdog_dev);
-+      u32 val;
-+
-+      val = readl(airoha_wdt->base + WDT_TIMER_CUR_VALUE);
-+      return DIV_ROUND_UP(val, airoha_wdt->wdt_freq);
-+}
-+
-+static const struct watchdog_info airoha_wdt_info = {
-+      .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
-+      .identity = "Airoha Watchdog",
-+};
-+
-+static const struct watchdog_ops airoha_wdt_ops = {
-+      .owner = THIS_MODULE,
-+      .start = airoha_wdt_start,
-+      .stop = airoha_wdt_stop,
-+      .ping = airoha_wdt_ping,
-+      .set_timeout = airoha_wdt_set_timeout,
-+      .get_timeleft = airoha_wdt_get_timeleft,
-+};
-+
-+static int airoha_wdt_probe(struct platform_device *pdev)
-+{
-+      struct airoha_wdt_desc *airoha_wdt;
-+      struct watchdog_device *wdog_dev;
-+      struct device *dev = &pdev->dev;
-+      struct clk *bus_clk;
-+      int ret;
-+
-+      airoha_wdt = devm_kzalloc(dev, sizeof(*airoha_wdt), GFP_KERNEL);
-+      if (!airoha_wdt)
-+              return -ENOMEM;
-+
-+      airoha_wdt->base = devm_platform_ioremap_resource(pdev, 0);
-+      if (IS_ERR(airoha_wdt->base))
-+              return PTR_ERR(airoha_wdt->base);
-+
-+      bus_clk = devm_clk_get_enabled(dev, "bus");
-+      if (IS_ERR(bus_clk))
-+              return dev_err_probe(dev, PTR_ERR(bus_clk),
-+                                   "failed to enable bus clock\n");
-+
-+      /* Watchdog ticks at half the bus rate */
-+      airoha_wdt->wdt_freq = clk_get_rate(bus_clk) / 2;
-+
-+      /* Initialize struct watchdog device */
-+      wdog_dev = &airoha_wdt->wdog_dev;
-+      wdog_dev->timeout = heartbeat;
-+      wdog_dev->info = &airoha_wdt_info;
-+      wdog_dev->ops = &airoha_wdt_ops;
-+      /* Bus 300MHz, watchdog 150MHz, 28 seconds */
-+      wdog_dev->max_timeout = FIELD_MAX(WDT_TIMER_VAL) / airoha_wdt->wdt_freq;
-+      wdog_dev->parent = dev;
-+
-+      watchdog_set_drvdata(wdog_dev, airoha_wdt);
-+      watchdog_set_nowayout(wdog_dev, nowayout);
-+      watchdog_stop_on_unregister(wdog_dev);
-+
-+      ret = devm_watchdog_register_device(dev, wdog_dev);
-+      if (ret)
-+              return ret;
-+
-+      platform_set_drvdata(pdev, airoha_wdt);
-+      return 0;
-+}
-+
-+static int airoha_wdt_suspend(struct device *dev)
-+{
-+      struct airoha_wdt_desc *airoha_wdt = dev_get_drvdata(dev);
-+
-+      if (watchdog_active(&airoha_wdt->wdog_dev))
-+              airoha_wdt_stop(&airoha_wdt->wdog_dev);
-+
-+      return 0;
-+}
-+
-+static int airoha_wdt_resume(struct device *dev)
-+{
-+      struct airoha_wdt_desc *airoha_wdt = dev_get_drvdata(dev);
-+
-+      if (watchdog_active(&airoha_wdt->wdog_dev)) {
-+              airoha_wdt_start(&airoha_wdt->wdog_dev);
-+              airoha_wdt_ping(&airoha_wdt->wdog_dev);
-+      }
-+      return 0;
-+}
-+
-+static const struct of_device_id airoha_wdt_of_match[] = {
-+      { .compatible = "airoha,en7581-wdt", },
-+      { },
-+};
-+
-+MODULE_DEVICE_TABLE(of, airoha_wdt_of_match);
-+
-+static DEFINE_SIMPLE_DEV_PM_OPS(airoha_wdt_pm_ops, airoha_wdt_suspend, airoha_wdt_resume);
-+
-+static struct platform_driver airoha_wdt_driver = {
-+      .probe = airoha_wdt_probe,
-+      .driver = {
-+              .name = "airoha-wdt",
-+              .pm = pm_sleep_ptr(&airoha_wdt_pm_ops),
-+              .of_match_table = airoha_wdt_of_match,
-+      },
-+};
-+
-+module_platform_driver(airoha_wdt_driver);
-+
-+MODULE_AUTHOR("Mayur Kumar <mayur.kumar@airoha.com>");
-+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
-+MODULE_DESCRIPTION("Airoha EN7581 Watchdog Driver");
-+MODULE_LICENSE("GPL");
diff --git a/target/linux/airoha/patches-6.6/033-01-v6.13-clk-en7523-remove-REG_PCIE-_-MEM-MEM_MASK-configurat.patch b/target/linux/airoha/patches-6.6/033-01-v6.13-clk-en7523-remove-REG_PCIE-_-MEM-MEM_MASK-configurat.patch
deleted file mode 100644 (file)
index 59aefbb..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-From c31d1cdd7bff1d2c13d435bb9d0c76bfaa332097 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 12 Nov 2024 01:08:49 +0100
-Subject: [PATCH 1/6] clk: en7523: remove REG_PCIE*_{MEM,MEM_MASK}
- configuration
-
-REG_PCIE*_MEM and REG_PCIE*_MEM_MASK regs (PBUS_CSR memory region) are not
-part of the scu block on the EN7581 SoC and they are used to select the
-PCIE ports on the PBUS, so remove this configuration from the clock driver
-and set these registers in the PCIE host driver instead.
-This patch does not introduce any backward incompatibility since the dts
-for EN7581 SoC is not upstream yet.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/20241112-clk-en7581-syscon-v2-2-8ada5e394ae4@kernel.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 18 ------------------
- 1 file changed, 18 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -31,12 +31,6 @@
- #define   REG_RESET_CONTROL_PCIE1     BIT(27)
- #define   REG_RESET_CONTROL_PCIE2     BIT(26)
- /* EN7581 */
--#define REG_PCIE0_MEM                 0x00
--#define REG_PCIE0_MEM_MASK            0x04
--#define REG_PCIE1_MEM                 0x08
--#define REG_PCIE1_MEM_MASK            0x0c
--#define REG_PCIE2_MEM                 0x10
--#define REG_PCIE2_MEM_MASK            0x14
- #define REG_NP_SCU_PCIC                       0x88
- #define REG_NP_SCU_SSTR                       0x9c
- #define REG_PCIE_XSI0_SEL_MASK                GENMASK(14, 13)
-@@ -415,26 +409,14 @@ static void en7581_pci_disable(struct cl
- static int en7581_clk_hw_init(struct platform_device *pdev,
-                             void __iomem *np_base)
- {
--      void __iomem *pb_base;
-       u32 val;
--      pb_base = devm_platform_ioremap_resource(pdev, 3);
--      if (IS_ERR(pb_base))
--              return PTR_ERR(pb_base);
--
-       val = readl(np_base + REG_NP_SCU_SSTR);
-       val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
-       writel(val, np_base + REG_NP_SCU_SSTR);
-       val = readl(np_base + REG_NP_SCU_PCIC);
-       writel(val | 3, np_base + REG_NP_SCU_PCIC);
--      writel(0x20000000, pb_base + REG_PCIE0_MEM);
--      writel(0xfc000000, pb_base + REG_PCIE0_MEM_MASK);
--      writel(0x24000000, pb_base + REG_PCIE1_MEM);
--      writel(0xfc000000, pb_base + REG_PCIE1_MEM_MASK);
--      writel(0x28000000, pb_base + REG_PCIE2_MEM);
--      writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK);
--
-       return 0;
- }
diff --git a/target/linux/airoha/patches-6.6/033-02-v6.13-clk-en7523-move-clock_register-in-hw_init-callback.patch b/target/linux/airoha/patches-6.6/033-02-v6.13-clk-en7523-move-clock_register-in-hw_init-callback.patch
deleted file mode 100644 (file)
index 6a76886..0000000
+++ /dev/null
@@ -1,146 +0,0 @@
-From b8bdfc666bc5f58caf46e67b615132fccbaca3d4 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 12 Nov 2024 01:08:50 +0100
-Subject: [PATCH 2/6] clk: en7523: move clock_register in hw_init callback
-
-Move en7523_register_clocks routine in hw_init callback.
-Introduce en7523_clk_hw_init callback for EN7523 SoC.
-This is a preliminary patch to differentiate IO mapped region between
-EN7523 and EN7581 SoCs in order to access chip-scu IO region
-<0x1fa20000 0x384> on EN7581 SoC as syscon device since it contains
-miscellaneous registers needed by multiple devices (clock, pinctrl ..).
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/20241112-clk-en7581-syscon-v2-3-8ada5e394ae4@kernel.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 82 ++++++++++++++++++++++++----------------
- 1 file changed, 50 insertions(+), 32 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -78,7 +78,8 @@ struct en_clk_soc_data {
-               const u16 *idx_map;
-               u16 idx_map_nr;
-       } reset;
--      int (*hw_init)(struct platform_device *pdev, void __iomem *np_base);
-+      int (*hw_init)(struct platform_device *pdev,
-+                     struct clk_hw_onecell_data *clk_data);
- };
- static const u32 gsw_base[] = { 400000000, 500000000 };
-@@ -406,20 +407,6 @@ static void en7581_pci_disable(struct cl
-       usleep_range(1000, 2000);
- }
--static int en7581_clk_hw_init(struct platform_device *pdev,
--                            void __iomem *np_base)
--{
--      u32 val;
--
--      val = readl(np_base + REG_NP_SCU_SSTR);
--      val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
--      writel(val, np_base + REG_NP_SCU_SSTR);
--      val = readl(np_base + REG_NP_SCU_PCIC);
--      writel(val | 3, np_base + REG_NP_SCU_PCIC);
--
--      return 0;
--}
--
- static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
-                                  void __iomem *base, void __iomem *np_base)
- {
-@@ -449,6 +436,49 @@ static void en7523_register_clocks(struc
-       clk_data->hws[EN7523_CLK_PCIE] = hw;
- }
-+static int en7523_clk_hw_init(struct platform_device *pdev,
-+                            struct clk_hw_onecell_data *clk_data)
-+{
-+      void __iomem *base, *np_base;
-+
-+      base = devm_platform_ioremap_resource(pdev, 0);
-+      if (IS_ERR(base))
-+              return PTR_ERR(base);
-+
-+      np_base = devm_platform_ioremap_resource(pdev, 1);
-+      if (IS_ERR(np_base))
-+              return PTR_ERR(np_base);
-+
-+      en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
-+
-+      return 0;
-+}
-+
-+static int en7581_clk_hw_init(struct platform_device *pdev,
-+                            struct clk_hw_onecell_data *clk_data)
-+{
-+      void __iomem *base, *np_base;
-+      u32 val;
-+
-+      base = devm_platform_ioremap_resource(pdev, 0);
-+      if (IS_ERR(base))
-+              return PTR_ERR(base);
-+
-+      np_base = devm_platform_ioremap_resource(pdev, 1);
-+      if (IS_ERR(np_base))
-+              return PTR_ERR(np_base);
-+
-+      en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
-+
-+      val = readl(np_base + REG_NP_SCU_SSTR);
-+      val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
-+      writel(val, np_base + REG_NP_SCU_SSTR);
-+      val = readl(np_base + REG_NP_SCU_PCIC);
-+      writel(val | 3, np_base + REG_NP_SCU_PCIC);
-+
-+      return 0;
-+}
-+
- static int en7523_reset_update(struct reset_controller_dev *rcdev,
-                              unsigned long id, bool assert)
- {
-@@ -543,31 +573,18 @@ static int en7523_clk_probe(struct platf
-       struct device_node *node = pdev->dev.of_node;
-       const struct en_clk_soc_data *soc_data;
-       struct clk_hw_onecell_data *clk_data;
--      void __iomem *base, *np_base;
-       int r;
--      base = devm_platform_ioremap_resource(pdev, 0);
--      if (IS_ERR(base))
--              return PTR_ERR(base);
--
--      np_base = devm_platform_ioremap_resource(pdev, 1);
--      if (IS_ERR(np_base))
--              return PTR_ERR(np_base);
--
--      soc_data = device_get_match_data(&pdev->dev);
--      if (soc_data->hw_init) {
--              r = soc_data->hw_init(pdev, np_base);
--              if (r)
--                      return r;
--      }
--
-       clk_data = devm_kzalloc(&pdev->dev,
-                               struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
-                               GFP_KERNEL);
-       if (!clk_data)
-               return -ENOMEM;
--      en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
-+      soc_data = device_get_match_data(&pdev->dev);
-+      r = soc_data->hw_init(pdev, clk_data);
-+      if (r)
-+              return r;
-       r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-       if (r)
-@@ -590,6 +607,7 @@ static const struct en_clk_soc_data en75
-               .prepare = en7523_pci_prepare,
-               .unprepare = en7523_pci_unprepare,
-       },
-+      .hw_init = en7523_clk_hw_init,
- };
- static const struct en_clk_soc_data en7581_data = {
diff --git a/target/linux/airoha/patches-6.6/033-03-v6.13-clk-en7523-introduce-chip_scu-regmap.patch b/target/linux/airoha/patches-6.6/033-03-v6.13-clk-en7523-introduce-chip_scu-regmap.patch
deleted file mode 100644 (file)
index 3196627..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-From f72fc22038dd544fa4d39c06e8c81c09c0041ed4 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 12 Nov 2024 01:08:51 +0100
-Subject: [PATCH 3/6] clk: en7523: introduce chip_scu regmap
-
-Introduce chip_scu regmap pointer since EN7581 SoC will access chip-scu
-memory area via a syscon node. Remove first memory region mapping
-for EN7581 SoC. This patch does not introduce any backward incompatibility
-since the dts for EN7581 SoC is not upstream yet.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/20241112-clk-en7581-syscon-v2-4-8ada5e394ae4@kernel.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 81 ++++++++++++++++++++++++++++++----------
- 1 file changed, 61 insertions(+), 20 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -3,8 +3,10 @@
- #include <linux/delay.h>
- #include <linux/clk-provider.h>
- #include <linux/io.h>
-+#include <linux/mfd/syscon.h>
- #include <linux/platform_device.h>
- #include <linux/property.h>
-+#include <linux/regmap.h>
- #include <linux/reset-controller.h>
- #include <dt-bindings/clock/en7523-clk.h>
- #include <dt-bindings/reset/airoha,en7581-reset.h>
-@@ -247,15 +249,11 @@ static const u16 en7581_rst_map[] = {
-       [EN7581_XPON_MAC_RST]           = RST_NR_PER_BANK + 31,
- };
--static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
-+static u32 en7523_get_base_rate(const struct en_clk_desc *desc, u32 val)
- {
--      const struct en_clk_desc *desc = &en7523_base_clks[i];
--      u32 val;
--
-       if (!desc->base_bits)
-               return desc->base_value;
--      val = readl(base + desc->base_reg);
-       val >>= desc->base_shift;
-       val &= (1 << desc->base_bits) - 1;
-@@ -265,16 +263,11 @@ static unsigned int en7523_get_base_rate
-       return desc->base_values[val];
- }
--static u32 en7523_get_div(void __iomem *base, int i)
-+static u32 en7523_get_div(const struct en_clk_desc *desc, u32 val)
- {
--      const struct en_clk_desc *desc = &en7523_base_clks[i];
--      u32 reg, val;
--
-       if (!desc->div_bits)
-               return 1;
--      reg = desc->div_reg ? desc->div_reg : desc->base_reg;
--      val = readl(base + reg);
-       val >>= desc->div_shift;
-       val &= (1 << desc->div_bits) - 1;
-@@ -418,9 +411,12 @@ static void en7523_register_clocks(struc
-       for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
-               const struct en_clk_desc *desc = &en7523_base_clks[i];
-+              u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
-+              u32 val = readl(base + desc->base_reg);
--              rate = en7523_get_base_rate(base, i);
--              rate /= en7523_get_div(base, i);
-+              rate = en7523_get_base_rate(desc, val);
-+              val = readl(base + reg);
-+              rate /= en7523_get_div(desc, val);
-               hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate);
-               if (IS_ERR(hw)) {
-@@ -454,21 +450,66 @@ static int en7523_clk_hw_init(struct pla
-       return 0;
- }
-+static void en7581_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
-+                                 struct regmap *map, void __iomem *base)
-+{
-+      struct clk_hw *hw;
-+      u32 rate;
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
-+              const struct en_clk_desc *desc = &en7523_base_clks[i];
-+              u32 val, reg = desc->div_reg ? desc->div_reg : desc->base_reg;
-+              int err;
-+
-+              err = regmap_read(map, desc->base_reg, &val);
-+              if (err) {
-+                      pr_err("Failed reading fixed clk rate %s: %d\n",
-+                             desc->name, err);
-+                      continue;
-+              }
-+              rate = en7523_get_base_rate(desc, val);
-+
-+              err = regmap_read(map, reg, &val);
-+              if (err) {
-+                      pr_err("Failed reading fixed clk div %s: %d\n",
-+                             desc->name, err);
-+                      continue;
-+              }
-+              rate /= en7523_get_div(desc, val);
-+
-+              hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate);
-+              if (IS_ERR(hw)) {
-+                      pr_err("Failed to register clk %s: %ld\n",
-+                             desc->name, PTR_ERR(hw));
-+                      continue;
-+              }
-+
-+              clk_data->hws[desc->id] = hw;
-+      }
-+
-+      hw = en7523_register_pcie_clk(dev, base);
-+      clk_data->hws[EN7523_CLK_PCIE] = hw;
-+
-+      clk_data->num = EN7523_NUM_CLOCKS;
-+}
-+
- static int en7581_clk_hw_init(struct platform_device *pdev,
-                             struct clk_hw_onecell_data *clk_data)
- {
--      void __iomem *base, *np_base;
-+      void __iomem *np_base;
-+      struct regmap *map;
-       u32 val;
--      base = devm_platform_ioremap_resource(pdev, 0);
--      if (IS_ERR(base))
--              return PTR_ERR(base);
-+      map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu");
-+      if (IS_ERR(map))
-+              return PTR_ERR(map);
--      np_base = devm_platform_ioremap_resource(pdev, 1);
-+      np_base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(np_base))
-               return PTR_ERR(np_base);
--      en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
-+      en7581_register_clocks(&pdev->dev, clk_data, map, np_base);
-       val = readl(np_base + REG_NP_SCU_SSTR);
-       val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
-@@ -545,7 +586,7 @@ static int en7523_reset_register(struct
-       if (!soc_data->reset.idx_map_nr)
-               return 0;
--      base = devm_platform_ioremap_resource(pdev, 2);
-+      base = devm_platform_ioremap_resource(pdev, 1);
-       if (IS_ERR(base))
-               return PTR_ERR(base);
diff --git a/target/linux/airoha/patches-6.6/033-04-v6.13-clk-en7523-fix-estimation-of-fixed-rate-for-EN7581.patch b/target/linux/airoha/patches-6.6/033-04-v6.13-clk-en7523-fix-estimation-of-fixed-rate-for-EN7581.patch
deleted file mode 100644 (file)
index 79c9ab2..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-From f98eded9e9ab048c88ff59c5523e703a6ced5523 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 12 Nov 2024 01:08:52 +0100
-Subject: [PATCH 4/6] clk: en7523: fix estimation of fixed rate for EN7581
-
-Introduce en7581_base_clks array in order to define per-SoC fixed-rate
-clock parameters and fix wrong parameters for emi, npu and crypto EN7581
-clocks
-
-Fixes: 66bc47326ce2 ("clk: en7523: Add EN7581 support")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/20241112-clk-en7581-syscon-v2-5-8ada5e394ae4@kernel.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 105 ++++++++++++++++++++++++++++++++++++++-
- 1 file changed, 103 insertions(+), 2 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -37,6 +37,7 @@
- #define REG_NP_SCU_SSTR                       0x9c
- #define REG_PCIE_XSI0_SEL_MASK                GENMASK(14, 13)
- #define REG_PCIE_XSI1_SEL_MASK                GENMASK(12, 11)
-+#define REG_CRYPTO_CLKSRC2            0x20c
- #define REG_RST_CTRL2                 0x00
- #define REG_RST_CTRL1                 0x04
-@@ -89,6 +90,10 @@ static const u32 emi_base[] = { 33300000
- static const u32 bus_base[] = { 500000000, 540000000 };
- static const u32 slic_base[] = { 100000000, 3125000 };
- static const u32 npu_base[] = { 333000000, 400000000, 500000000 };
-+/* EN7581 */
-+static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 };
-+static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
-+static const u32 crypto_base[] = { 540000000, 480000000 };
- static const struct en_clk_desc en7523_base_clks[] = {
-       {
-@@ -186,6 +191,102 @@ static const struct en_clk_desc en7523_b
-       }
- };
-+static const struct en_clk_desc en7581_base_clks[] = {
-+      {
-+              .id = EN7523_CLK_GSW,
-+              .name = "gsw",
-+
-+              .base_reg = REG_GSW_CLK_DIV_SEL,
-+              .base_bits = 1,
-+              .base_shift = 8,
-+              .base_values = gsw_base,
-+              .n_base_values = ARRAY_SIZE(gsw_base),
-+
-+              .div_bits = 3,
-+              .div_shift = 0,
-+              .div_step = 1,
-+              .div_offset = 1,
-+      }, {
-+              .id = EN7523_CLK_EMI,
-+              .name = "emi",
-+
-+              .base_reg = REG_EMI_CLK_DIV_SEL,
-+              .base_bits = 2,
-+              .base_shift = 8,
-+              .base_values = emi7581_base,
-+              .n_base_values = ARRAY_SIZE(emi7581_base),
-+
-+              .div_bits = 3,
-+              .div_shift = 0,
-+              .div_step = 1,
-+              .div_offset = 1,
-+      }, {
-+              .id = EN7523_CLK_BUS,
-+              .name = "bus",
-+
-+              .base_reg = REG_BUS_CLK_DIV_SEL,
-+              .base_bits = 1,
-+              .base_shift = 8,
-+              .base_values = bus_base,
-+              .n_base_values = ARRAY_SIZE(bus_base),
-+
-+              .div_bits = 3,
-+              .div_shift = 0,
-+              .div_step = 1,
-+              .div_offset = 1,
-+      }, {
-+              .id = EN7523_CLK_SLIC,
-+              .name = "slic",
-+
-+              .base_reg = REG_SPI_CLK_FREQ_SEL,
-+              .base_bits = 1,
-+              .base_shift = 0,
-+              .base_values = slic_base,
-+              .n_base_values = ARRAY_SIZE(slic_base),
-+
-+              .div_reg = REG_SPI_CLK_DIV_SEL,
-+              .div_bits = 5,
-+              .div_shift = 24,
-+              .div_val0 = 20,
-+              .div_step = 2,
-+      }, {
-+              .id = EN7523_CLK_SPI,
-+              .name = "spi",
-+
-+              .base_reg = REG_SPI_CLK_DIV_SEL,
-+
-+              .base_value = 400000000,
-+
-+              .div_bits = 5,
-+              .div_shift = 8,
-+              .div_val0 = 40,
-+              .div_step = 2,
-+      }, {
-+              .id = EN7523_CLK_NPU,
-+              .name = "npu",
-+
-+              .base_reg = REG_NPU_CLK_DIV_SEL,
-+              .base_bits = 2,
-+              .base_shift = 8,
-+              .base_values = npu7581_base,
-+              .n_base_values = ARRAY_SIZE(npu7581_base),
-+
-+              .div_bits = 3,
-+              .div_shift = 0,
-+              .div_step = 1,
-+              .div_offset = 1,
-+      }, {
-+              .id = EN7523_CLK_CRYPTO,
-+              .name = "crypto",
-+
-+              .base_reg = REG_CRYPTO_CLKSRC2,
-+              .base_bits = 1,
-+              .base_shift = 0,
-+              .base_values = crypto_base,
-+              .n_base_values = ARRAY_SIZE(crypto_base),
-+      }
-+};
-+
- static const u16 en7581_rst_ofs[] = {
-       REG_RST_CTRL2,
-       REG_RST_CTRL1,
-@@ -457,8 +558,8 @@ static void en7581_register_clocks(struc
-       u32 rate;
-       int i;
--      for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
--              const struct en_clk_desc *desc = &en7523_base_clks[i];
-+      for (i = 0; i < ARRAY_SIZE(en7581_base_clks); i++) {
-+              const struct en_clk_desc *desc = &en7581_base_clks[i];
-               u32 val, reg = desc->div_reg ? desc->div_reg : desc->base_reg;
-               int err;
diff --git a/target/linux/airoha/patches-6.6/033-05-v6.13-clk-en7523-move-en7581_reset_register-in-en7581_clk_.patch b/target/linux/airoha/patches-6.6/033-05-v6.13-clk-en7523-move-en7581_reset_register-in-en7581_clk_.patch
deleted file mode 100644 (file)
index 36b9d9f..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-From 82e6bf912d5846646892becea659b39d178d79e3 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 12 Nov 2024 01:08:53 +0100
-Subject: [PATCH 5/6] clk: en7523: move en7581_reset_register() in
- en7581_clk_hw_init()
-
-Move en7581_reset_register routine in en7581_clk_hw_init() since reset
-feature is supported just by EN7581 SoC.
-Get rid of reset struct in en_clk_soc_data data struct.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/20241112-clk-en7581-syscon-v2-6-8ada5e394ae4@kernel.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 93 ++++++++++++++--------------------------
- 1 file changed, 33 insertions(+), 60 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -76,11 +76,6 @@ struct en_rst_data {
- struct en_clk_soc_data {
-       const struct clk_ops pcie_ops;
--      struct {
--              const u16 *bank_ofs;
--              const u16 *idx_map;
--              u16 idx_map_nr;
--      } reset;
-       int (*hw_init)(struct platform_device *pdev,
-                      struct clk_hw_onecell_data *clk_data);
- };
-@@ -595,32 +590,6 @@ static void en7581_register_clocks(struc
-       clk_data->num = EN7523_NUM_CLOCKS;
- }
--static int en7581_clk_hw_init(struct platform_device *pdev,
--                            struct clk_hw_onecell_data *clk_data)
--{
--      void __iomem *np_base;
--      struct regmap *map;
--      u32 val;
--
--      map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu");
--      if (IS_ERR(map))
--              return PTR_ERR(map);
--
--      np_base = devm_platform_ioremap_resource(pdev, 0);
--      if (IS_ERR(np_base))
--              return PTR_ERR(np_base);
--
--      en7581_register_clocks(&pdev->dev, clk_data, map, np_base);
--
--      val = readl(np_base + REG_NP_SCU_SSTR);
--      val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
--      writel(val, np_base + REG_NP_SCU_SSTR);
--      val = readl(np_base + REG_NP_SCU_PCIC);
--      writel(val | 3, np_base + REG_NP_SCU_PCIC);
--
--      return 0;
--}
--
- static int en7523_reset_update(struct reset_controller_dev *rcdev,
-                              unsigned long id, bool assert)
- {
-@@ -670,23 +639,18 @@ static int en7523_reset_xlate(struct res
-       return rst_data->idx_map[reset_spec->args[0]];
- }
--static const struct reset_control_ops en7523_reset_ops = {
-+static const struct reset_control_ops en7581_reset_ops = {
-       .assert = en7523_reset_assert,
-       .deassert = en7523_reset_deassert,
-       .status = en7523_reset_status,
- };
--static int en7523_reset_register(struct platform_device *pdev,
--                               const struct en_clk_soc_data *soc_data)
-+static int en7581_reset_register(struct platform_device *pdev)
- {
-       struct device *dev = &pdev->dev;
-       struct en_rst_data *rst_data;
-       void __iomem *base;
--      /* no reset lines available */
--      if (!soc_data->reset.idx_map_nr)
--              return 0;
--
-       base = devm_platform_ioremap_resource(pdev, 1);
-       if (IS_ERR(base))
-               return PTR_ERR(base);
-@@ -695,13 +659,13 @@ static int en7523_reset_register(struct
-       if (!rst_data)
-               return -ENOMEM;
--      rst_data->bank_ofs = soc_data->reset.bank_ofs;
--      rst_data->idx_map = soc_data->reset.idx_map;
-+      rst_data->bank_ofs = en7581_rst_ofs;
-+      rst_data->idx_map = en7581_rst_map;
-       rst_data->base = base;
--      rst_data->rcdev.nr_resets = soc_data->reset.idx_map_nr;
-+      rst_data->rcdev.nr_resets = ARRAY_SIZE(en7581_rst_map);
-       rst_data->rcdev.of_xlate = en7523_reset_xlate;
--      rst_data->rcdev.ops = &en7523_reset_ops;
-+      rst_data->rcdev.ops = &en7581_reset_ops;
-       rst_data->rcdev.of_node = dev->of_node;
-       rst_data->rcdev.of_reset_n_cells = 1;
-       rst_data->rcdev.owner = THIS_MODULE;
-@@ -710,6 +674,32 @@ static int en7523_reset_register(struct
-       return devm_reset_controller_register(dev, &rst_data->rcdev);
- }
-+static int en7581_clk_hw_init(struct platform_device *pdev,
-+                            struct clk_hw_onecell_data *clk_data)
-+{
-+      void __iomem *np_base;
-+      struct regmap *map;
-+      u32 val;
-+
-+      map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu");
-+      if (IS_ERR(map))
-+              return PTR_ERR(map);
-+
-+      np_base = devm_platform_ioremap_resource(pdev, 0);
-+      if (IS_ERR(np_base))
-+              return PTR_ERR(np_base);
-+
-+      en7581_register_clocks(&pdev->dev, clk_data, map, np_base);
-+
-+      val = readl(np_base + REG_NP_SCU_SSTR);
-+      val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
-+      writel(val, np_base + REG_NP_SCU_SSTR);
-+      val = readl(np_base + REG_NP_SCU_PCIC);
-+      writel(val | 3, np_base + REG_NP_SCU_PCIC);
-+
-+      return en7581_reset_register(pdev);
-+}
-+
- static int en7523_clk_probe(struct platform_device *pdev)
- {
-       struct device_node *node = pdev->dev.of_node;
-@@ -728,19 +718,7 @@ static int en7523_clk_probe(struct platf
-       if (r)
-               return r;
--      r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
--      if (r)
--              return dev_err_probe(&pdev->dev, r, "Could not register clock provider: %s\n",
--                                   pdev->name);
--
--      r = en7523_reset_register(pdev, soc_data);
--      if (r) {
--              of_clk_del_provider(node);
--              return dev_err_probe(&pdev->dev, r, "Could not register reset controller: %s\n",
--                                   pdev->name);
--      }
--
--      return 0;
-+      return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- }
- static const struct en_clk_soc_data en7523_data = {
-@@ -758,11 +736,6 @@ static const struct en_clk_soc_data en75
-               .enable = en7581_pci_enable,
-               .disable = en7581_pci_disable,
-       },
--      .reset = {
--              .bank_ofs = en7581_rst_ofs,
--              .idx_map = en7581_rst_map,
--              .idx_map_nr = ARRAY_SIZE(en7581_rst_map),
--      },
-       .hw_init = en7581_clk_hw_init,
- };
diff --git a/target/linux/airoha/patches-6.6/033-06-v6.13-clk-en7523-map-io-region-in-a-single-block.patch b/target/linux/airoha/patches-6.6/033-06-v6.13-clk-en7523-map-io-region-in-a-single-block.patch
deleted file mode 100644 (file)
index dec7b81..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-From a9eaf305017a5ebe73ab34e85bd5414055a88f29 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 12 Nov 2024 01:08:54 +0100
-Subject: [PATCH 6/6] clk: en7523: map io region in a single block
-
-Map all clock-controller memory region in a single block.
-This patch does not introduce any backward incompatibility since the dts
-for EN7581 SoC is not upstream yet.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/20241112-clk-en7581-syscon-v2-7-8ada5e394ae4@kernel.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 32 +++++++++++++-------------------
- 1 file changed, 13 insertions(+), 19 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -39,8 +39,8 @@
- #define REG_PCIE_XSI1_SEL_MASK                GENMASK(12, 11)
- #define REG_CRYPTO_CLKSRC2            0x20c
--#define REG_RST_CTRL2                 0x00
--#define REG_RST_CTRL1                 0x04
-+#define REG_RST_CTRL2                 0x830
-+#define REG_RST_CTRL1                 0x834
- struct en_clk_desc {
-       int id;
-@@ -645,15 +645,9 @@ static const struct reset_control_ops en
-       .status = en7523_reset_status,
- };
--static int en7581_reset_register(struct platform_device *pdev)
-+static int en7581_reset_register(struct device *dev, void __iomem *base)
- {
--      struct device *dev = &pdev->dev;
-       struct en_rst_data *rst_data;
--      void __iomem *base;
--
--      base = devm_platform_ioremap_resource(pdev, 1);
--      if (IS_ERR(base))
--              return PTR_ERR(base);
-       rst_data = devm_kzalloc(dev, sizeof(*rst_data), GFP_KERNEL);
-       if (!rst_data)
-@@ -677,27 +671,27 @@ static int en7581_reset_register(struct
- static int en7581_clk_hw_init(struct platform_device *pdev,
-                             struct clk_hw_onecell_data *clk_data)
- {
--      void __iomem *np_base;
-       struct regmap *map;
-+      void __iomem *base;
-       u32 val;
-       map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu");
-       if (IS_ERR(map))
-               return PTR_ERR(map);
--      np_base = devm_platform_ioremap_resource(pdev, 0);
--      if (IS_ERR(np_base))
--              return PTR_ERR(np_base);
-+      base = devm_platform_ioremap_resource(pdev, 0);
-+      if (IS_ERR(base))
-+              return PTR_ERR(base);
--      en7581_register_clocks(&pdev->dev, clk_data, map, np_base);
-+      en7581_register_clocks(&pdev->dev, clk_data, map, base);
--      val = readl(np_base + REG_NP_SCU_SSTR);
-+      val = readl(base + REG_NP_SCU_SSTR);
-       val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
--      writel(val, np_base + REG_NP_SCU_SSTR);
--      val = readl(np_base + REG_NP_SCU_PCIC);
--      writel(val | 3, np_base + REG_NP_SCU_PCIC);
-+      writel(val, base + REG_NP_SCU_SSTR);
-+      val = readl(base + REG_NP_SCU_PCIC);
-+      writel(val | 3, base + REG_NP_SCU_PCIC);
--      return en7581_reset_register(pdev);
-+      return en7581_reset_register(&pdev->dev, base);
- }
- static int en7523_clk_probe(struct platform_device *pdev)
diff --git a/target/linux/airoha/patches-6.6/034-v6.13-pinctrl-airoha-Add-support-for-EN7581-SoC.patch b/target/linux/airoha/patches-6.6/034-v6.13-pinctrl-airoha-Add-support-for-EN7581-SoC.patch
deleted file mode 100644 (file)
index a7d3888..0000000
+++ /dev/null
@@ -1,3060 +0,0 @@
-From 1c8ace2d0725c1c8d5012f8a56c5fb31805aad27 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 23 Oct 2024 01:20:05 +0200
-Subject: [PATCH] pinctrl: airoha: Add support for EN7581 SoC
-
-Introduce pinctrl driver for EN7581 SoC. Current EN7581 pinctrl driver
-supports the following functionalities:
-- pin multiplexing
-- pin pull-up, pull-down, open-drain, current strength,
-  {input,output}_enable, output_{low,high}
-- gpio controller
-- irq controller
-
-Tested-by: Benjamin Larsson <benjamin.larsson@genexis.eu>
-Co-developed-by: Benjamin Larsson <benjamin.larsson@genexis.eu>
-Signed-off-by: Benjamin Larsson <benjamin.larsson@genexis.eu>
-Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/20241023-en7581-pinctrl-v9-5-afb0cbcab0ec@kernel.org
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
----
- MAINTAINERS                               |    7 +
- drivers/pinctrl/mediatek/Kconfig          |   17 +-
- drivers/pinctrl/mediatek/Makefile         |    1 +
- drivers/pinctrl/mediatek/pinctrl-airoha.c | 2970 +++++++++++++++++++++
- 4 files changed, 2994 insertions(+), 1 deletion(-)
- create mode 100644 drivers/pinctrl/mediatek/pinctrl-airoha.c
-
---- a/MAINTAINERS
-+++ b/MAINTAINERS
-@@ -16872,6 +16872,13 @@ F:    drivers/pinctrl/
- F:    include/dt-bindings/pinctrl/
- F:    include/linux/pinctrl/
-+PIN CONTROLLER - AIROHA
-+M:    Lorenzo Bianconi <lorenzo@kernel.org>
-+L:    linux-mediatek@lists.infradead.org (moderated for non-subscribers)
-+S:    Maintained
-+F:    Documentation/devicetree/bindings/pinctrl/airoha,en7581-pinctrl.yaml
-+F:    drivers/pinctrl/mediatek/pinctrl-airoha.c
-+
- PIN CONTROLLER - AMD
- M:    Basavaraj Natikar <Basavaraj.Natikar@amd.com>
- M:    Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
---- a/drivers/pinctrl/mediatek/Kconfig
-+++ b/drivers/pinctrl/mediatek/Kconfig
-@@ -1,6 +1,6 @@
- # SPDX-License-Identifier: GPL-2.0-only
- menu "MediaTek pinctrl drivers"
--      depends on ARCH_MEDIATEK || RALINK || COMPILE_TEST
-+      depends on ARCH_MEDIATEK || ARCH_AIROHA || RALINK || COMPILE_TEST
- config EINT_MTK
-       tristate "MediaTek External Interrupt Support"
-@@ -126,6 +126,21 @@ config PINCTRL_MT8127
-       select PINCTRL_MTK
- # For ARMv8 SoCs
-+config PINCTRL_AIROHA
-+      tristate "Airoha EN7581 pin control"
-+      depends on OF
-+      depends on ARM64 || COMPILE_TEST
-+      select PINMUX
-+      select GENERIC_PINCONF
-+      select GENERIC_PINCTRL_GROUPS
-+      select GENERIC_PINMUX_FUNCTIONS
-+      select GPIOLIB
-+      select GPIOLIB_IRQCHIP
-+      select REGMAP_MMIO
-+      help
-+        Say yes here to support pin controller and gpio driver
-+        on Airoha EN7581 SoC.
-+
- config PINCTRL_MT2712
-       bool "MediaTek MT2712 pin control"
-       depends on OF
---- a/drivers/pinctrl/mediatek/Makefile
-+++ b/drivers/pinctrl/mediatek/Makefile
-@@ -8,6 +8,7 @@ obj-$(CONFIG_PINCTRL_MTK_MOORE)                += pinc
- obj-$(CONFIG_PINCTRL_MTK_PARIS)               += pinctrl-paris.o
- # SoC Drivers
-+obj-$(CONFIG_PINCTRL_AIROHA)          += pinctrl-airoha.o
- obj-$(CONFIG_PINCTRL_MT7620)          += pinctrl-mt7620.o
- obj-$(CONFIG_PINCTRL_MT7621)          += pinctrl-mt7621.o
- obj-$(CONFIG_PINCTRL_MT76X8)          += pinctrl-mt76x8.o
---- /dev/null
-+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
-@@ -0,0 +1,2970 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ * Author: Benjamin Larsson <benjamin.larsson@genexis.eu>
-+ * Author: Markus Gothe <markus.gothe@genexis.eu>
-+ */
-+
-+#include <dt-bindings/pinctrl/mt65xx.h>
-+#include <linux/bits.h>
-+#include <linux/cleanup.h>
-+#include <linux/gpio/driver.h>
-+#include <linux/interrupt.h>
-+#include <linux/io.h>
-+#include <linux/irq.h>
-+#include <linux/irqdomain.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/of.h>
-+#include <linux/of_irq.h>
-+#include <linux/of_platform.h>
-+#include <linux/pinctrl/consumer.h>
-+#include <linux/pinctrl/pinctrl.h>
-+#include <linux/pinctrl/pinconf.h>
-+#include <linux/pinctrl/pinconf-generic.h>
-+#include <linux/pinctrl/pinmux.h>
-+#include <linux/platform_device.h>
-+#include <linux/regmap.h>
-+
-+#include "../core.h"
-+#include "../pinconf.h"
-+#include "../pinmux.h"
-+
-+#define PINCTRL_PIN_GROUP(id)                                         \
-+      PINCTRL_PINGROUP(#id, id##_pins, ARRAY_SIZE(id##_pins))
-+
-+#define PINCTRL_FUNC_DESC(id)                                         \
-+      {                                                               \
-+              .desc = { #id, id##_groups, ARRAY_SIZE(id##_groups) },  \
-+              .groups = id##_func_group,                              \
-+              .group_size = ARRAY_SIZE(id##_func_group),              \
-+      }
-+
-+#define PINCTRL_CONF_DESC(p, offset, mask)                            \
-+      {                                                               \
-+              .pin = p,                                               \
-+              .reg = { offset, mask },                                \
-+      }
-+
-+/* MUX */
-+#define REG_GPIO_2ND_I2C_MODE                 0x0214
-+#define GPIO_MDC_IO_MASTER_MODE_MODE          BIT(14)
-+#define GPIO_I2C_MASTER_MODE_MODE             BIT(13)
-+#define GPIO_I2S_MODE_MASK                    BIT(12)
-+#define GPIO_I2C_SLAVE_MODE_MODE              BIT(11)
-+#define GPIO_LAN3_LED1_MODE_MASK              BIT(10)
-+#define GPIO_LAN3_LED0_MODE_MASK              BIT(9)
-+#define GPIO_LAN2_LED1_MODE_MASK              BIT(8)
-+#define GPIO_LAN2_LED0_MODE_MASK              BIT(7)
-+#define GPIO_LAN1_LED1_MODE_MASK              BIT(6)
-+#define GPIO_LAN1_LED0_MODE_MASK              BIT(5)
-+#define GPIO_LAN0_LED1_MODE_MASK              BIT(4)
-+#define GPIO_LAN0_LED0_MODE_MASK              BIT(3)
-+#define PON_TOD_1PPS_MODE_MASK                        BIT(2)
-+#define GSW_TOD_1PPS_MODE_MASK                        BIT(1)
-+#define GPIO_2ND_I2C_MODE_MASK                        BIT(0)
-+
-+#define REG_GPIO_SPI_CS1_MODE                 0x0218
-+#define GPIO_PCM_SPI_CS4_MODE_MASK            BIT(21)
-+#define GPIO_PCM_SPI_CS3_MODE_MASK            BIT(20)
-+#define GPIO_PCM_SPI_CS2_MODE_P156_MASK               BIT(19)
-+#define GPIO_PCM_SPI_CS2_MODE_P128_MASK               BIT(18)
-+#define GPIO_PCM_SPI_CS1_MODE_MASK            BIT(17)
-+#define GPIO_PCM_SPI_MODE_MASK                        BIT(16)
-+#define GPIO_PCM2_MODE_MASK                   BIT(13)
-+#define GPIO_PCM1_MODE_MASK                   BIT(12)
-+#define GPIO_PCM_INT_MODE_MASK                        BIT(9)
-+#define GPIO_PCM_RESET_MODE_MASK              BIT(8)
-+#define GPIO_SPI_QUAD_MODE_MASK                       BIT(4)
-+#define GPIO_SPI_CS4_MODE_MASK                        BIT(3)
-+#define GPIO_SPI_CS3_MODE_MASK                        BIT(2)
-+#define GPIO_SPI_CS2_MODE_MASK                        BIT(1)
-+#define GPIO_SPI_CS1_MODE_MASK                        BIT(0)
-+
-+#define REG_GPIO_PON_MODE                     0x021c
-+#define GPIO_PARALLEL_NAND_MODE_MASK          BIT(14)
-+#define GPIO_SGMII_MDIO_MODE_MASK             BIT(13)
-+#define GPIO_PCIE_RESET2_MASK                 BIT(12)
-+#define SIPO_RCLK_MODE_MASK                   BIT(11)
-+#define GPIO_PCIE_RESET1_MASK                 BIT(10)
-+#define GPIO_PCIE_RESET0_MASK                 BIT(9)
-+#define GPIO_UART5_MODE_MASK                  BIT(8)
-+#define GPIO_UART4_MODE_MASK                  BIT(7)
-+#define GPIO_HSUART_CTS_RTS_MODE_MASK         BIT(6)
-+#define GPIO_HSUART_MODE_MASK                 BIT(5)
-+#define GPIO_UART2_CTS_RTS_MODE_MASK          BIT(4)
-+#define GPIO_UART2_MODE_MASK                  BIT(3)
-+#define GPIO_SIPO_MODE_MASK                   BIT(2)
-+#define GPIO_EMMC_MODE_MASK                   BIT(1)
-+#define GPIO_PON_MODE_MASK                    BIT(0)
-+
-+#define REG_NPU_UART_EN                               0x0224
-+#define JTAG_UDI_EN_MASK                      BIT(4)
-+#define JTAG_DFD_EN_MASK                      BIT(3)
-+
-+/* LED MAP */
-+#define REG_LAN_LED0_MAPPING                  0x027c
-+#define REG_LAN_LED1_MAPPING                  0x0280
-+
-+#define LAN4_LED_MAPPING_MASK                 GENMASK(18, 16)
-+#define LAN4_PHY4_LED_MAP                     BIT(18)
-+#define LAN4_PHY2_LED_MAP                     BIT(17)
-+#define LAN4_PHY1_LED_MAP                     BIT(16)
-+#define LAN4_PHY0_LED_MAP                     0
-+#define LAN4_PHY3_LED_MAP                     GENMASK(17, 16)
-+
-+#define LAN3_LED_MAPPING_MASK                 GENMASK(14, 12)
-+#define LAN3_PHY4_LED_MAP                     BIT(14)
-+#define LAN3_PHY2_LED_MAP                     BIT(13)
-+#define LAN3_PHY1_LED_MAP                     BIT(12)
-+#define LAN3_PHY0_LED_MAP                     0
-+#define LAN3_PHY3_LED_MAP                     GENMASK(13, 12)
-+
-+#define LAN2_LED_MAPPING_MASK                 GENMASK(10, 8)
-+#define LAN2_PHY4_LED_MAP                     BIT(12)
-+#define LAN2_PHY2_LED_MAP                     BIT(11)
-+#define LAN2_PHY1_LED_MAP                     BIT(10)
-+#define LAN2_PHY0_LED_MAP                     0
-+#define LAN2_PHY3_LED_MAP                     GENMASK(11, 10)
-+
-+#define LAN1_LED_MAPPING_MASK                 GENMASK(6, 4)
-+#define LAN1_PHY4_LED_MAP                     BIT(6)
-+#define LAN1_PHY2_LED_MAP                     BIT(5)
-+#define LAN1_PHY1_LED_MAP                     BIT(4)
-+#define LAN1_PHY0_LED_MAP                     0
-+#define LAN1_PHY3_LED_MAP                     GENMASK(5, 4)
-+
-+#define LAN0_LED_MAPPING_MASK                 GENMASK(2, 0)
-+#define LAN0_PHY4_LED_MAP                     BIT(3)
-+#define LAN0_PHY2_LED_MAP                     BIT(2)
-+#define LAN0_PHY1_LED_MAP                     BIT(1)
-+#define LAN0_PHY0_LED_MAP                     0
-+#define LAN0_PHY3_LED_MAP                     GENMASK(2, 1)
-+
-+/* CONF */
-+#define REG_I2C_SDA_E2                                0x001c
-+#define SPI_MISO_E2_MASK                      BIT(14)
-+#define SPI_MOSI_E2_MASK                      BIT(13)
-+#define SPI_CLK_E2_MASK                               BIT(12)
-+#define SPI_CS0_E2_MASK                               BIT(11)
-+#define PCIE2_RESET_E2_MASK                   BIT(10)
-+#define PCIE1_RESET_E2_MASK                   BIT(9)
-+#define PCIE0_RESET_E2_MASK                   BIT(8)
-+#define UART1_RXD_E2_MASK                     BIT(3)
-+#define UART1_TXD_E2_MASK                     BIT(2)
-+#define I2C_SCL_E2_MASK                               BIT(1)
-+#define I2C_SDA_E2_MASK                               BIT(0)
-+
-+#define REG_I2C_SDA_E4                                0x0020
-+#define SPI_MISO_E4_MASK                      BIT(14)
-+#define SPI_MOSI_E4_MASK                      BIT(13)
-+#define SPI_CLK_E4_MASK                               BIT(12)
-+#define SPI_CS0_E4_MASK                               BIT(11)
-+#define PCIE2_RESET_E4_MASK                   BIT(10)
-+#define PCIE1_RESET_E4_MASK                   BIT(9)
-+#define PCIE0_RESET_E4_MASK                   BIT(8)
-+#define UART1_RXD_E4_MASK                     BIT(3)
-+#define UART1_TXD_E4_MASK                     BIT(2)
-+#define I2C_SCL_E4_MASK                               BIT(1)
-+#define I2C_SDA_E4_MASK                               BIT(0)
-+
-+#define REG_GPIO_L_E2                         0x0024
-+#define REG_GPIO_L_E4                         0x0028
-+#define REG_GPIO_H_E2                         0x002c
-+#define REG_GPIO_H_E4                         0x0030
-+
-+#define REG_I2C_SDA_PU                                0x0044
-+#define SPI_MISO_PU_MASK                      BIT(14)
-+#define SPI_MOSI_PU_MASK                      BIT(13)
-+#define SPI_CLK_PU_MASK                               BIT(12)
-+#define SPI_CS0_PU_MASK                               BIT(11)
-+#define PCIE2_RESET_PU_MASK                   BIT(10)
-+#define PCIE1_RESET_PU_MASK                   BIT(9)
-+#define PCIE0_RESET_PU_MASK                   BIT(8)
-+#define UART1_RXD_PU_MASK                     BIT(3)
-+#define UART1_TXD_PU_MASK                     BIT(2)
-+#define I2C_SCL_PU_MASK                               BIT(1)
-+#define I2C_SDA_PU_MASK                               BIT(0)
-+
-+#define REG_I2C_SDA_PD                                0x0048
-+#define SPI_MISO_PD_MASK                      BIT(14)
-+#define SPI_MOSI_PD_MASK                      BIT(13)
-+#define SPI_CLK_PD_MASK                               BIT(12)
-+#define SPI_CS0_PD_MASK                               BIT(11)
-+#define PCIE2_RESET_PD_MASK                   BIT(10)
-+#define PCIE1_RESET_PD_MASK                   BIT(9)
-+#define PCIE0_RESET_PD_MASK                   BIT(8)
-+#define UART1_RXD_PD_MASK                     BIT(3)
-+#define UART1_TXD_PD_MASK                     BIT(2)
-+#define I2C_SCL_PD_MASK                               BIT(1)
-+#define I2C_SDA_PD_MASK                               BIT(0)
-+
-+#define REG_GPIO_L_PU                         0x004c
-+#define REG_GPIO_L_PD                         0x0050
-+#define REG_GPIO_H_PU                         0x0054
-+#define REG_GPIO_H_PD                         0x0058
-+
-+#define REG_PCIE_RESET_OD                     0x018c
-+#define PCIE2_RESET_OD_MASK                   BIT(2)
-+#define PCIE1_RESET_OD_MASK                   BIT(1)
-+#define PCIE0_RESET_OD_MASK                   BIT(0)
-+
-+/* GPIOs */
-+#define REG_GPIO_CTRL                         0x0000
-+#define REG_GPIO_DATA                         0x0004
-+#define REG_GPIO_INT                          0x0008
-+#define REG_GPIO_INT_EDGE                     0x000c
-+#define REG_GPIO_INT_LEVEL                    0x0010
-+#define REG_GPIO_OE                           0x0014
-+#define REG_GPIO_CTRL1                                0x0020
-+
-+/* PWM MODE CONF */
-+#define REG_GPIO_FLASH_MODE_CFG                       0x0034
-+#define GPIO15_FLASH_MODE_CFG                 BIT(15)
-+#define GPIO14_FLASH_MODE_CFG                 BIT(14)
-+#define GPIO13_FLASH_MODE_CFG                 BIT(13)
-+#define GPIO12_FLASH_MODE_CFG                 BIT(12)
-+#define GPIO11_FLASH_MODE_CFG                 BIT(11)
-+#define GPIO10_FLASH_MODE_CFG                 BIT(10)
-+#define GPIO9_FLASH_MODE_CFG                  BIT(9)
-+#define GPIO8_FLASH_MODE_CFG                  BIT(8)
-+#define GPIO7_FLASH_MODE_CFG                  BIT(7)
-+#define GPIO6_FLASH_MODE_CFG                  BIT(6)
-+#define GPIO5_FLASH_MODE_CFG                  BIT(5)
-+#define GPIO4_FLASH_MODE_CFG                  BIT(4)
-+#define GPIO3_FLASH_MODE_CFG                  BIT(3)
-+#define GPIO2_FLASH_MODE_CFG                  BIT(2)
-+#define GPIO1_FLASH_MODE_CFG                  BIT(1)
-+#define GPIO0_FLASH_MODE_CFG                  BIT(0)
-+
-+#define REG_GPIO_CTRL2                                0x0060
-+#define REG_GPIO_CTRL3                                0x0064
-+
-+/* PWM MODE CONF EXT */
-+#define REG_GPIO_FLASH_MODE_CFG_EXT           0x0068
-+#define GPIO51_FLASH_MODE_CFG                 BIT(31)
-+#define GPIO50_FLASH_MODE_CFG                 BIT(30)
-+#define GPIO49_FLASH_MODE_CFG                 BIT(29)
-+#define GPIO48_FLASH_MODE_CFG                 BIT(28)
-+#define GPIO47_FLASH_MODE_CFG                 BIT(27)
-+#define GPIO46_FLASH_MODE_CFG                 BIT(26)
-+#define GPIO45_FLASH_MODE_CFG                 BIT(25)
-+#define GPIO44_FLASH_MODE_CFG                 BIT(24)
-+#define GPIO43_FLASH_MODE_CFG                 BIT(23)
-+#define GPIO42_FLASH_MODE_CFG                 BIT(22)
-+#define GPIO41_FLASH_MODE_CFG                 BIT(21)
-+#define GPIO40_FLASH_MODE_CFG                 BIT(20)
-+#define GPIO39_FLASH_MODE_CFG                 BIT(19)
-+#define GPIO38_FLASH_MODE_CFG                 BIT(18)
-+#define GPIO37_FLASH_MODE_CFG                 BIT(17)
-+#define GPIO36_FLASH_MODE_CFG                 BIT(16)
-+#define GPIO31_FLASH_MODE_CFG                 BIT(15)
-+#define GPIO30_FLASH_MODE_CFG                 BIT(14)
-+#define GPIO29_FLASH_MODE_CFG                 BIT(13)
-+#define GPIO28_FLASH_MODE_CFG                 BIT(12)
-+#define GPIO27_FLASH_MODE_CFG                 BIT(11)
-+#define GPIO26_FLASH_MODE_CFG                 BIT(10)
-+#define GPIO25_FLASH_MODE_CFG                 BIT(9)
-+#define GPIO24_FLASH_MODE_CFG                 BIT(8)
-+#define GPIO23_FLASH_MODE_CFG                 BIT(7)
-+#define GPIO22_FLASH_MODE_CFG                 BIT(6)
-+#define GPIO21_FLASH_MODE_CFG                 BIT(5)
-+#define GPIO20_FLASH_MODE_CFG                 BIT(4)
-+#define GPIO19_FLASH_MODE_CFG                 BIT(3)
-+#define GPIO18_FLASH_MODE_CFG                 BIT(2)
-+#define GPIO17_FLASH_MODE_CFG                 BIT(1)
-+#define GPIO16_FLASH_MODE_CFG                 BIT(0)
-+
-+#define REG_GPIO_DATA1                                0x0070
-+#define REG_GPIO_OE1                          0x0078
-+#define REG_GPIO_INT1                         0x007c
-+#define REG_GPIO_INT_EDGE1                    0x0080
-+#define REG_GPIO_INT_EDGE2                    0x0084
-+#define REG_GPIO_INT_EDGE3                    0x0088
-+#define REG_GPIO_INT_LEVEL1                   0x008c
-+#define REG_GPIO_INT_LEVEL2                   0x0090
-+#define REG_GPIO_INT_LEVEL3                   0x0094
-+
-+#define AIROHA_NUM_PINS                               64
-+#define AIROHA_PIN_BANK_SIZE                  (AIROHA_NUM_PINS / 2)
-+#define AIROHA_REG_GPIOCTRL_NUM_PIN           (AIROHA_NUM_PINS / 4)
-+
-+static const u32 gpio_data_regs[] = {
-+      REG_GPIO_DATA,
-+      REG_GPIO_DATA1
-+};
-+
-+static const u32 gpio_out_regs[] = {
-+      REG_GPIO_OE,
-+      REG_GPIO_OE1
-+};
-+
-+static const u32 gpio_dir_regs[] = {
-+      REG_GPIO_CTRL,
-+      REG_GPIO_CTRL1,
-+      REG_GPIO_CTRL2,
-+      REG_GPIO_CTRL3
-+};
-+
-+static const u32 irq_status_regs[] = {
-+      REG_GPIO_INT,
-+      REG_GPIO_INT1
-+};
-+
-+static const u32 irq_level_regs[] = {
-+      REG_GPIO_INT_LEVEL,
-+      REG_GPIO_INT_LEVEL1,
-+      REG_GPIO_INT_LEVEL2,
-+      REG_GPIO_INT_LEVEL3
-+};
-+
-+static const u32 irq_edge_regs[] = {
-+      REG_GPIO_INT_EDGE,
-+      REG_GPIO_INT_EDGE1,
-+      REG_GPIO_INT_EDGE2,
-+      REG_GPIO_INT_EDGE3
-+};
-+
-+struct airoha_pinctrl_reg {
-+      u32 offset;
-+      u32 mask;
-+};
-+
-+enum airoha_pinctrl_mux_func {
-+      AIROHA_FUNC_MUX,
-+      AIROHA_FUNC_PWM_MUX,
-+      AIROHA_FUNC_PWM_EXT_MUX,
-+};
-+
-+struct airoha_pinctrl_func_group {
-+      const char *name;
-+      struct {
-+              enum airoha_pinctrl_mux_func mux;
-+              u32 offset;
-+              u32 mask;
-+              u32 val;
-+      } regmap[2];
-+      int regmap_size;
-+};
-+
-+struct airoha_pinctrl_func {
-+      const struct function_desc desc;
-+      const struct airoha_pinctrl_func_group *groups;
-+      u8 group_size;
-+};
-+
-+struct airoha_pinctrl_conf {
-+      u32 pin;
-+      struct airoha_pinctrl_reg reg;
-+};
-+
-+struct airoha_pinctrl_gpiochip {
-+      struct gpio_chip chip;
-+
-+      /* gpio */
-+      const u32 *data;
-+      const u32 *dir;
-+      const u32 *out;
-+      /* irq */
-+      const u32 *status;
-+      const u32 *level;
-+      const u32 *edge;
-+
-+      u32 irq_type[AIROHA_NUM_PINS];
-+};
-+
-+struct airoha_pinctrl {
-+      struct pinctrl_dev *ctrl;
-+
-+      struct regmap *chip_scu;
-+      struct regmap *regmap;
-+
-+      struct airoha_pinctrl_gpiochip gpiochip;
-+};
-+
-+static struct pinctrl_pin_desc airoha_pinctrl_pins[] = {
-+      PINCTRL_PIN(0, "uart1_txd"),
-+      PINCTRL_PIN(1, "uart1_rxd"),
-+      PINCTRL_PIN(2, "i2c_scl"),
-+      PINCTRL_PIN(3, "i2c_sda"),
-+      PINCTRL_PIN(4, "spi_cs0"),
-+      PINCTRL_PIN(5, "spi_clk"),
-+      PINCTRL_PIN(6, "spi_mosi"),
-+      PINCTRL_PIN(7, "spi_miso"),
-+      PINCTRL_PIN(13, "gpio0"),
-+      PINCTRL_PIN(14, "gpio1"),
-+      PINCTRL_PIN(15, "gpio2"),
-+      PINCTRL_PIN(16, "gpio3"),
-+      PINCTRL_PIN(17, "gpio4"),
-+      PINCTRL_PIN(18, "gpio5"),
-+      PINCTRL_PIN(19, "gpio6"),
-+      PINCTRL_PIN(20, "gpio7"),
-+      PINCTRL_PIN(21, "gpio8"),
-+      PINCTRL_PIN(22, "gpio9"),
-+      PINCTRL_PIN(23, "gpio10"),
-+      PINCTRL_PIN(24, "gpio11"),
-+      PINCTRL_PIN(25, "gpio12"),
-+      PINCTRL_PIN(26, "gpio13"),
-+      PINCTRL_PIN(27, "gpio14"),
-+      PINCTRL_PIN(28, "gpio15"),
-+      PINCTRL_PIN(29, "gpio16"),
-+      PINCTRL_PIN(30, "gpio17"),
-+      PINCTRL_PIN(31, "gpio18"),
-+      PINCTRL_PIN(32, "gpio19"),
-+      PINCTRL_PIN(33, "gpio20"),
-+      PINCTRL_PIN(34, "gpio21"),
-+      PINCTRL_PIN(35, "gpio22"),
-+      PINCTRL_PIN(36, "gpio23"),
-+      PINCTRL_PIN(37, "gpio24"),
-+      PINCTRL_PIN(38, "gpio25"),
-+      PINCTRL_PIN(39, "gpio26"),
-+      PINCTRL_PIN(40, "gpio27"),
-+      PINCTRL_PIN(41, "gpio28"),
-+      PINCTRL_PIN(42, "gpio29"),
-+      PINCTRL_PIN(43, "gpio30"),
-+      PINCTRL_PIN(44, "gpio31"),
-+      PINCTRL_PIN(45, "gpio32"),
-+      PINCTRL_PIN(46, "gpio33"),
-+      PINCTRL_PIN(47, "gpio34"),
-+      PINCTRL_PIN(48, "gpio35"),
-+      PINCTRL_PIN(49, "gpio36"),
-+      PINCTRL_PIN(50, "gpio37"),
-+      PINCTRL_PIN(51, "gpio38"),
-+      PINCTRL_PIN(52, "gpio39"),
-+      PINCTRL_PIN(53, "gpio40"),
-+      PINCTRL_PIN(54, "gpio41"),
-+      PINCTRL_PIN(55, "gpio42"),
-+      PINCTRL_PIN(56, "gpio43"),
-+      PINCTRL_PIN(57, "gpio44"),
-+      PINCTRL_PIN(58, "gpio45"),
-+      PINCTRL_PIN(59, "gpio46"),
-+      PINCTRL_PIN(61, "pcie_reset0"),
-+      PINCTRL_PIN(62, "pcie_reset1"),
-+      PINCTRL_PIN(63, "pcie_reset2"),
-+};
-+
-+static const int pon_pins[] = { 49, 50, 51, 52, 53, 54 };
-+static const int pon_tod_1pps_pins[] = { 46 };
-+static const int gsw_tod_1pps_pins[] = { 46 };
-+static const int sipo_pins[] = { 16, 17 };
-+static const int sipo_rclk_pins[] = { 16, 17, 43 };
-+static const int mdio_pins[] = { 14, 15 };
-+static const int uart2_pins[] = { 48, 55 };
-+static const int uart2_cts_rts_pins[] = { 46, 47 };
-+static const int hsuart_pins[] = { 28, 29 };
-+static const int hsuart_cts_rts_pins[] = { 26, 27 };
-+static const int uart4_pins[] = { 38, 39 };
-+static const int uart5_pins[] = { 18, 19 };
-+static const int i2c0_pins[] = { 2, 3 };
-+static const int i2c1_pins[] = { 14, 15 };
-+static const int jtag_udi_pins[] = { 16, 17, 18, 19, 20 };
-+static const int jtag_dfd_pins[] = { 16, 17, 18, 19, 20 };
-+static const int i2s_pins[] = { 26, 27, 28, 29 };
-+static const int pcm1_pins[] = { 22, 23, 24, 25 };
-+static const int pcm2_pins[] = { 18, 19, 20, 21 };
-+static const int spi_quad_pins[] = { 32, 33 };
-+static const int spi_pins[] = { 4, 5, 6, 7 };
-+static const int spi_cs1_pins[] = { 34 };
-+static const int pcm_spi_pins[] = { 18, 19, 20, 21, 22, 23, 24, 25 };
-+static const int pcm_spi_int_pins[] = { 14 };
-+static const int pcm_spi_rst_pins[] = { 15 };
-+static const int pcm_spi_cs1_pins[] = { 43 };
-+static const int pcm_spi_cs2_pins[] = { 40 };
-+static const int pcm_spi_cs2_p128_pins[] = { 40 };
-+static const int pcm_spi_cs2_p156_pins[] = { 40 };
-+static const int pcm_spi_cs3_pins[] = { 41 };
-+static const int pcm_spi_cs4_pins[] = { 42 };
-+static const int emmc_pins[] = { 4, 5, 6, 30, 31, 32, 33, 34, 35, 36, 37 };
-+static const int pnand_pins[] = { 4, 5, 6, 7, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42 };
-+static const int gpio0_pins[] = { 13 };
-+static const int gpio1_pins[] = { 14 };
-+static const int gpio2_pins[] = { 15 };
-+static const int gpio3_pins[] = { 16 };
-+static const int gpio4_pins[] = { 17 };
-+static const int gpio5_pins[] = { 18 };
-+static const int gpio6_pins[] = { 19 };
-+static const int gpio7_pins[] = { 20 };
-+static const int gpio8_pins[] = { 21 };
-+static const int gpio9_pins[] = { 22 };
-+static const int gpio10_pins[] = { 23 };
-+static const int gpio11_pins[] = { 24 };
-+static const int gpio12_pins[] = { 25 };
-+static const int gpio13_pins[] = { 26 };
-+static const int gpio14_pins[] = { 27 };
-+static const int gpio15_pins[] = { 28 };
-+static const int gpio16_pins[] = { 29 };
-+static const int gpio17_pins[] = { 30 };
-+static const int gpio18_pins[] = { 31 };
-+static const int gpio19_pins[] = { 32 };
-+static const int gpio20_pins[] = { 33 };
-+static const int gpio21_pins[] = { 34 };
-+static const int gpio22_pins[] = { 35 };
-+static const int gpio23_pins[] = { 36 };
-+static const int gpio24_pins[] = { 37 };
-+static const int gpio25_pins[] = { 38 };
-+static const int gpio26_pins[] = { 39 };
-+static const int gpio27_pins[] = { 40 };
-+static const int gpio28_pins[] = { 41 };
-+static const int gpio29_pins[] = { 42 };
-+static const int gpio30_pins[] = { 43 };
-+static const int gpio31_pins[] = { 44 };
-+static const int gpio33_pins[] = { 46 };
-+static const int gpio34_pins[] = { 47 };
-+static const int gpio35_pins[] = { 48 };
-+static const int gpio36_pins[] = { 49 };
-+static const int gpio37_pins[] = { 50 };
-+static const int gpio38_pins[] = { 51 };
-+static const int gpio39_pins[] = { 52 };
-+static const int gpio40_pins[] = { 53 };
-+static const int gpio41_pins[] = { 54 };
-+static const int gpio42_pins[] = { 55 };
-+static const int gpio43_pins[] = { 56 };
-+static const int gpio44_pins[] = { 57 };
-+static const int gpio45_pins[] = { 58 };
-+static const int gpio46_pins[] = { 59 };
-+static const int pcie_reset0_pins[] = { 61 };
-+static const int pcie_reset1_pins[] = { 62 };
-+static const int pcie_reset2_pins[] = { 63 };
-+
-+static const struct pingroup airoha_pinctrl_groups[] = {
-+      PINCTRL_PIN_GROUP(pon),
-+      PINCTRL_PIN_GROUP(pon_tod_1pps),
-+      PINCTRL_PIN_GROUP(gsw_tod_1pps),
-+      PINCTRL_PIN_GROUP(sipo),
-+      PINCTRL_PIN_GROUP(sipo_rclk),
-+      PINCTRL_PIN_GROUP(mdio),
-+      PINCTRL_PIN_GROUP(uart2),
-+      PINCTRL_PIN_GROUP(uart2_cts_rts),
-+      PINCTRL_PIN_GROUP(hsuart),
-+      PINCTRL_PIN_GROUP(hsuart_cts_rts),
-+      PINCTRL_PIN_GROUP(uart4),
-+      PINCTRL_PIN_GROUP(uart5),
-+      PINCTRL_PIN_GROUP(i2c0),
-+      PINCTRL_PIN_GROUP(i2c1),
-+      PINCTRL_PIN_GROUP(jtag_udi),
-+      PINCTRL_PIN_GROUP(jtag_dfd),
-+      PINCTRL_PIN_GROUP(i2s),
-+      PINCTRL_PIN_GROUP(pcm1),
-+      PINCTRL_PIN_GROUP(pcm2),
-+      PINCTRL_PIN_GROUP(spi),
-+      PINCTRL_PIN_GROUP(spi_quad),
-+      PINCTRL_PIN_GROUP(spi_cs1),
-+      PINCTRL_PIN_GROUP(pcm_spi),
-+      PINCTRL_PIN_GROUP(pcm_spi_int),
-+      PINCTRL_PIN_GROUP(pcm_spi_rst),
-+      PINCTRL_PIN_GROUP(pcm_spi_cs1),
-+      PINCTRL_PIN_GROUP(pcm_spi_cs2_p128),
-+      PINCTRL_PIN_GROUP(pcm_spi_cs2_p156),
-+      PINCTRL_PIN_GROUP(pcm_spi_cs2),
-+      PINCTRL_PIN_GROUP(pcm_spi_cs3),
-+      PINCTRL_PIN_GROUP(pcm_spi_cs4),
-+      PINCTRL_PIN_GROUP(emmc),
-+      PINCTRL_PIN_GROUP(pnand),
-+      PINCTRL_PIN_GROUP(gpio0),
-+      PINCTRL_PIN_GROUP(gpio1),
-+      PINCTRL_PIN_GROUP(gpio2),
-+      PINCTRL_PIN_GROUP(gpio3),
-+      PINCTRL_PIN_GROUP(gpio4),
-+      PINCTRL_PIN_GROUP(gpio5),
-+      PINCTRL_PIN_GROUP(gpio6),
-+      PINCTRL_PIN_GROUP(gpio7),
-+      PINCTRL_PIN_GROUP(gpio8),
-+      PINCTRL_PIN_GROUP(gpio9),
-+      PINCTRL_PIN_GROUP(gpio10),
-+      PINCTRL_PIN_GROUP(gpio11),
-+      PINCTRL_PIN_GROUP(gpio12),
-+      PINCTRL_PIN_GROUP(gpio13),
-+      PINCTRL_PIN_GROUP(gpio14),
-+      PINCTRL_PIN_GROUP(gpio15),
-+      PINCTRL_PIN_GROUP(gpio16),
-+      PINCTRL_PIN_GROUP(gpio17),
-+      PINCTRL_PIN_GROUP(gpio18),
-+      PINCTRL_PIN_GROUP(gpio19),
-+      PINCTRL_PIN_GROUP(gpio20),
-+      PINCTRL_PIN_GROUP(gpio21),
-+      PINCTRL_PIN_GROUP(gpio22),
-+      PINCTRL_PIN_GROUP(gpio23),
-+      PINCTRL_PIN_GROUP(gpio24),
-+      PINCTRL_PIN_GROUP(gpio25),
-+      PINCTRL_PIN_GROUP(gpio26),
-+      PINCTRL_PIN_GROUP(gpio27),
-+      PINCTRL_PIN_GROUP(gpio28),
-+      PINCTRL_PIN_GROUP(gpio29),
-+      PINCTRL_PIN_GROUP(gpio30),
-+      PINCTRL_PIN_GROUP(gpio31),
-+      PINCTRL_PIN_GROUP(gpio33),
-+      PINCTRL_PIN_GROUP(gpio34),
-+      PINCTRL_PIN_GROUP(gpio35),
-+      PINCTRL_PIN_GROUP(gpio36),
-+      PINCTRL_PIN_GROUP(gpio37),
-+      PINCTRL_PIN_GROUP(gpio38),
-+      PINCTRL_PIN_GROUP(gpio39),
-+      PINCTRL_PIN_GROUP(gpio40),
-+      PINCTRL_PIN_GROUP(gpio41),
-+      PINCTRL_PIN_GROUP(gpio42),
-+      PINCTRL_PIN_GROUP(gpio43),
-+      PINCTRL_PIN_GROUP(gpio44),
-+      PINCTRL_PIN_GROUP(gpio45),
-+      PINCTRL_PIN_GROUP(gpio46),
-+      PINCTRL_PIN_GROUP(pcie_reset0),
-+      PINCTRL_PIN_GROUP(pcie_reset1),
-+      PINCTRL_PIN_GROUP(pcie_reset2),
-+};
-+
-+static const char *const pon_groups[] = { "pon" };
-+static const char *const tod_1pps_groups[] = { "pon_tod_1pps", "gsw_tod_1pps" };
-+static const char *const sipo_groups[] = { "sipo", "sipo_rclk" };
-+static const char *const mdio_groups[] = { "mdio" };
-+static const char *const uart_groups[] = { "uart2", "uart2_cts_rts", "hsuart",
-+                                         "hsuart_cts_rts", "uart4",
-+                                         "uart5" };
-+static const char *const i2c_groups[] = { "i2c1" };
-+static const char *const jtag_groups[] = { "jtag_udi", "jtag_dfd" };
-+static const char *const pcm_groups[] = { "pcm1", "pcm2" };
-+static const char *const spi_groups[] = { "spi_quad", "spi_cs1" };
-+static const char *const pcm_spi_groups[] = { "pcm_spi", "pcm_spi_int",
-+                                            "pcm_spi_rst", "pcm_spi_cs1",
-+                                            "pcm_spi_cs2_p156",
-+                                            "pcm_spi_cs2_p128",
-+                                            "pcm_spi_cs3", "pcm_spi_cs4" };
-+static const char *const i2s_groups[] = { "i2s" };
-+static const char *const emmc_groups[] = { "emmc" };
-+static const char *const pnand_groups[] = { "pnand" };
-+static const char *const pcie_reset_groups[] = { "pcie_reset0", "pcie_reset1",
-+                                               "pcie_reset2" };
-+static const char *const pwm_groups[] = { "gpio0", "gpio1",
-+                                        "gpio2", "gpio3",
-+                                        "gpio4", "gpio5",
-+                                        "gpio6", "gpio7",
-+                                        "gpio8", "gpio9",
-+                                        "gpio10", "gpio11",
-+                                        "gpio12", "gpio13",
-+                                        "gpio14", "gpio15",
-+                                        "gpio16", "gpio17",
-+                                        "gpio18", "gpio19",
-+                                        "gpio20", "gpio21",
-+                                        "gpio22", "gpio23",
-+                                        "gpio24", "gpio25",
-+                                        "gpio26", "gpio27",
-+                                        "gpio28", "gpio29",
-+                                        "gpio30", "gpio31",
-+                                        "gpio36", "gpio37",
-+                                        "gpio38", "gpio39",
-+                                        "gpio40", "gpio41",
-+                                        "gpio42", "gpio43",
-+                                        "gpio44", "gpio45",
-+                                        "gpio46", "gpio47" };
-+static const char *const phy1_led0_groups[] = { "gpio33", "gpio34",
-+                                              "gpio35", "gpio42" };
-+static const char *const phy2_led0_groups[] = { "gpio33", "gpio34",
-+                                              "gpio35", "gpio42" };
-+static const char *const phy3_led0_groups[] = { "gpio33", "gpio34",
-+                                              "gpio35", "gpio42" };
-+static const char *const phy4_led0_groups[] = { "gpio33", "gpio34",
-+                                              "gpio35", "gpio42" };
-+static const char *const phy1_led1_groups[] = { "gpio43", "gpio44",
-+                                              "gpio45", "gpio46" };
-+static const char *const phy2_led1_groups[] = { "gpio43", "gpio44",
-+                                              "gpio45", "gpio46" };
-+static const char *const phy3_led1_groups[] = { "gpio43", "gpio44",
-+                                              "gpio45", "gpio46" };
-+static const char *const phy4_led1_groups[] = { "gpio43", "gpio44",
-+                                              "gpio45", "gpio46" };
-+
-+static const struct airoha_pinctrl_func_group pon_func_group[] = {
-+      {
-+              .name = "pon",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_PON_MODE_MASK,
-+                      GPIO_PON_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group tod_1pps_func_group[] = {
-+      {
-+              .name = "pon_tod_1pps",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      PON_TOD_1PPS_MODE_MASK,
-+                      PON_TOD_1PPS_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gsw_tod_1pps",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GSW_TOD_1PPS_MODE_MASK,
-+                      GSW_TOD_1PPS_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group sipo_func_group[] = {
-+      {
-+              .name = "sipo",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_SIPO_MODE_MASK | SIPO_RCLK_MODE_MASK,
-+                      GPIO_SIPO_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "sipo_rclk",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_SIPO_MODE_MASK | SIPO_RCLK_MODE_MASK,
-+                      GPIO_SIPO_MODE_MASK | SIPO_RCLK_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group mdio_func_group[] = {
-+      {
-+              .name = "mdio",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_SGMII_MDIO_MODE_MASK,
-+                      GPIO_SGMII_MDIO_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_MDC_IO_MASTER_MODE_MODE,
-+                      GPIO_MDC_IO_MASTER_MODE_MODE
-+              },
-+              .regmap_size = 2,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group uart_func_group[] = {
-+      {
-+              .name = "uart2",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_UART2_MODE_MASK,
-+                      GPIO_UART2_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "uart2_cts_rts",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_UART2_MODE_MASK | GPIO_UART2_CTS_RTS_MODE_MASK,
-+                      GPIO_UART2_MODE_MASK | GPIO_UART2_CTS_RTS_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "hsuart",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_HSUART_MODE_MASK | GPIO_HSUART_CTS_RTS_MODE_MASK,
-+                      GPIO_HSUART_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+      {
-+              .name = "hsuart_cts_rts",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_HSUART_MODE_MASK | GPIO_HSUART_CTS_RTS_MODE_MASK,
-+                      GPIO_HSUART_MODE_MASK | GPIO_HSUART_CTS_RTS_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "uart4",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_UART4_MODE_MASK,
-+                      GPIO_UART4_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "uart5",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_UART5_MODE_MASK,
-+                      GPIO_UART5_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group i2c_func_group[] = {
-+      {
-+              .name = "i2c1",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_2ND_I2C_MODE_MASK,
-+                      GPIO_2ND_I2C_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group jtag_func_group[] = {
-+      {
-+              .name = "jtag_udi",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_NPU_UART_EN,
-+                      JTAG_UDI_EN_MASK,
-+                      JTAG_UDI_EN_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "jtag_dfd",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_NPU_UART_EN,
-+                      JTAG_DFD_EN_MASK,
-+                      JTAG_DFD_EN_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group pcm_func_group[] = {
-+      {
-+              .name = "pcm1",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_PCM1_MODE_MASK,
-+                      GPIO_PCM1_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "pcm2",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_PCM2_MODE_MASK,
-+                      GPIO_PCM2_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group spi_func_group[] = {
-+      {
-+              .name = "spi_quad",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_SPI_QUAD_MODE_MASK,
-+                      GPIO_SPI_QUAD_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "spi_cs1",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_SPI_CS1_MODE_MASK,
-+                      GPIO_SPI_CS1_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "spi_cs2",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_SPI_CS2_MODE_MASK,
-+                      GPIO_SPI_CS2_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "spi_cs3",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_SPI_CS3_MODE_MASK,
-+                      GPIO_SPI_CS3_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "spi_cs4",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_SPI_CS4_MODE_MASK,
-+                      GPIO_SPI_CS4_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group pcm_spi_func_group[] = {
-+      {
-+              .name = "pcm_spi",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_PCM_SPI_MODE_MASK,
-+                      GPIO_PCM_SPI_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "pcm_spi_int",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_PCM_INT_MODE_MASK,
-+                      GPIO_PCM_INT_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "pcm_spi_rst",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_PCM_RESET_MODE_MASK,
-+                      GPIO_PCM_RESET_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "pcm_spi_cs1",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_PCM_SPI_CS1_MODE_MASK,
-+                      GPIO_PCM_SPI_CS1_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "pcm_spi_cs2_p128",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_PCM_SPI_CS2_MODE_P128_MASK,
-+                      GPIO_PCM_SPI_CS2_MODE_P128_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "pcm_spi_cs2_p156",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_PCM_SPI_CS2_MODE_P156_MASK,
-+                      GPIO_PCM_SPI_CS2_MODE_P156_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "pcm_spi_cs3",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_PCM_SPI_CS3_MODE_MASK,
-+                      GPIO_PCM_SPI_CS3_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "pcm_spi_cs4",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_SPI_CS1_MODE,
-+                      GPIO_PCM_SPI_CS4_MODE_MASK,
-+                      GPIO_PCM_SPI_CS4_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group i2s_func_group[] = {
-+      {
-+              .name = "i2s",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_I2S_MODE_MASK,
-+                      GPIO_I2S_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group emmc_func_group[] = {
-+      {
-+              .name = "emmc",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_EMMC_MODE_MASK,
-+                      GPIO_EMMC_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group pnand_func_group[] = {
-+      {
-+              .name = "pnand",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_PARALLEL_NAND_MODE_MASK,
-+                      GPIO_PARALLEL_NAND_MODE_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group pcie_reset_func_group[] = {
-+      {
-+              .name = "pcie_reset0",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_PCIE_RESET0_MASK,
-+                      GPIO_PCIE_RESET0_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "pcie_reset1",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_PCIE_RESET1_MASK,
-+                      GPIO_PCIE_RESET1_MASK
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "pcie_reset2",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_PON_MODE,
-+                      GPIO_PCIE_RESET2_MASK,
-+                      GPIO_PCIE_RESET2_MASK
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+/* PWM */
-+static const struct airoha_pinctrl_func_group pwm_func_group[] = {
-+      {
-+              .name = "gpio0",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO0_FLASH_MODE_CFG,
-+                      GPIO0_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio1",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO1_FLASH_MODE_CFG,
-+                      GPIO1_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio2",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO2_FLASH_MODE_CFG,
-+                      GPIO2_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio3",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO3_FLASH_MODE_CFG,
-+                      GPIO3_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio4",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO4_FLASH_MODE_CFG,
-+                      GPIO4_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio5",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO5_FLASH_MODE_CFG,
-+                      GPIO5_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio6",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO6_FLASH_MODE_CFG,
-+                      GPIO6_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio7",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO7_FLASH_MODE_CFG,
-+                      GPIO7_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio8",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO8_FLASH_MODE_CFG,
-+                      GPIO8_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio9",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO9_FLASH_MODE_CFG,
-+                      GPIO9_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio10",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO10_FLASH_MODE_CFG,
-+                      GPIO10_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio11",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO11_FLASH_MODE_CFG,
-+                      GPIO11_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio12",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO12_FLASH_MODE_CFG,
-+                      GPIO12_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio13",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO13_FLASH_MODE_CFG,
-+                      GPIO13_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio14",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO14_FLASH_MODE_CFG,
-+                      GPIO14_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio15",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG,
-+                      GPIO15_FLASH_MODE_CFG,
-+                      GPIO15_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio16",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO16_FLASH_MODE_CFG,
-+                      GPIO16_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio17",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO17_FLASH_MODE_CFG,
-+                      GPIO17_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio18",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO18_FLASH_MODE_CFG,
-+                      GPIO18_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio19",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO19_FLASH_MODE_CFG,
-+                      GPIO19_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio20",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO20_FLASH_MODE_CFG,
-+                      GPIO20_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio21",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO21_FLASH_MODE_CFG,
-+                      GPIO21_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio22",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO22_FLASH_MODE_CFG,
-+                      GPIO22_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio23",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO23_FLASH_MODE_CFG,
-+                      GPIO23_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio24",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO24_FLASH_MODE_CFG,
-+                      GPIO24_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio25",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO25_FLASH_MODE_CFG,
-+                      GPIO25_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio26",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO26_FLASH_MODE_CFG,
-+                      GPIO26_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio27",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO27_FLASH_MODE_CFG,
-+                      GPIO27_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio28",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO28_FLASH_MODE_CFG,
-+                      GPIO28_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio29",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO29_FLASH_MODE_CFG,
-+                      GPIO29_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio30",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO30_FLASH_MODE_CFG,
-+                      GPIO30_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio31",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO31_FLASH_MODE_CFG,
-+                      GPIO31_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio36",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO36_FLASH_MODE_CFG,
-+                      GPIO36_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio37",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO37_FLASH_MODE_CFG,
-+                      GPIO37_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio38",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO38_FLASH_MODE_CFG,
-+                      GPIO38_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio39",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO39_FLASH_MODE_CFG,
-+                      GPIO39_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio40",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO40_FLASH_MODE_CFG,
-+                      GPIO40_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio41",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO41_FLASH_MODE_CFG,
-+                      GPIO41_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio42",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO42_FLASH_MODE_CFG,
-+                      GPIO42_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio43",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO43_FLASH_MODE_CFG,
-+                      GPIO43_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio44",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO44_FLASH_MODE_CFG,
-+                      GPIO44_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio45",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO45_FLASH_MODE_CFG,
-+                      GPIO45_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio46",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO46_FLASH_MODE_CFG,
-+                      GPIO46_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      }, {
-+              .name = "gpio47",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_PWM_EXT_MUX,
-+                      REG_GPIO_FLASH_MODE_CFG_EXT,
-+                      GPIO47_FLASH_MODE_CFG,
-+                      GPIO47_FLASH_MODE_CFG
-+              },
-+              .regmap_size = 1,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = {
-+      {
-+              .name = "gpio33",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN0_LED0_MODE_MASK,
-+                      GPIO_LAN0_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY1_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio34",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN1_LED0_MODE_MASK,
-+                      GPIO_LAN1_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY1_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio35",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN2_LED0_MODE_MASK,
-+                      GPIO_LAN2_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY1_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio42",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN3_LED0_MODE_MASK,
-+                      GPIO_LAN3_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN4_LED_MAPPING_MASK,
-+                      LAN4_PHY1_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = {
-+      {
-+              .name = "gpio33",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN0_LED0_MODE_MASK,
-+                      GPIO_LAN0_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY2_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio34",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN1_LED0_MODE_MASK,
-+                      GPIO_LAN1_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY2_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio35",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN2_LED0_MODE_MASK,
-+                      GPIO_LAN2_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY2_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio42",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN3_LED0_MODE_MASK,
-+                      GPIO_LAN3_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN4_LED_MAPPING_MASK,
-+                      LAN4_PHY2_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = {
-+      {
-+              .name = "gpio33",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN0_LED0_MODE_MASK,
-+                      GPIO_LAN0_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY3_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio34",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN1_LED0_MODE_MASK,
-+                      GPIO_LAN1_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY3_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio35",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN2_LED0_MODE_MASK,
-+                      GPIO_LAN2_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY3_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio42",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN3_LED0_MODE_MASK,
-+                      GPIO_LAN3_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN4_LED_MAPPING_MASK,
-+                      LAN4_PHY3_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = {
-+      {
-+              .name = "gpio33",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN0_LED0_MODE_MASK,
-+                      GPIO_LAN0_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY4_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio34",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN1_LED0_MODE_MASK,
-+                      GPIO_LAN1_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY4_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio35",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN2_LED0_MODE_MASK,
-+                      GPIO_LAN2_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY4_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio42",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN3_LED0_MODE_MASK,
-+                      GPIO_LAN3_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED0_MAPPING,
-+                      LAN4_LED_MAPPING_MASK,
-+                      LAN4_PHY4_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = {
-+      {
-+              .name = "gpio43",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN0_LED1_MODE_MASK,
-+                      GPIO_LAN0_LED1_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY1_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio44",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN1_LED1_MODE_MASK,
-+                      GPIO_LAN1_LED1_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY1_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio45",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN2_LED1_MODE_MASK,
-+                      GPIO_LAN2_LED1_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY1_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio46",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN3_LED0_MODE_MASK,
-+                      GPIO_LAN3_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN4_LED_MAPPING_MASK,
-+                      LAN4_PHY1_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = {
-+      {
-+              .name = "gpio43",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN0_LED1_MODE_MASK,
-+                      GPIO_LAN0_LED1_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY2_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio44",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN1_LED1_MODE_MASK,
-+                      GPIO_LAN1_LED1_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY2_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio45",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN2_LED1_MODE_MASK,
-+                      GPIO_LAN2_LED1_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY2_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio46",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN3_LED0_MODE_MASK,
-+                      GPIO_LAN3_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN4_LED_MAPPING_MASK,
-+                      LAN4_PHY2_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = {
-+      {
-+              .name = "gpio43",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN0_LED1_MODE_MASK,
-+                      GPIO_LAN0_LED1_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY3_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio44",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN1_LED1_MODE_MASK,
-+                      GPIO_LAN1_LED1_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY3_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio45",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN2_LED1_MODE_MASK,
-+                      GPIO_LAN2_LED1_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY3_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio46",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN3_LED0_MODE_MASK,
-+                      GPIO_LAN3_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN4_LED_MAPPING_MASK,
-+                      LAN4_PHY3_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = {
-+      {
-+              .name = "gpio43",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN0_LED1_MODE_MASK,
-+                      GPIO_LAN0_LED1_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY4_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio44",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN1_LED1_MODE_MASK,
-+                      GPIO_LAN1_LED1_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY4_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio45",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN2_LED1_MODE_MASK,
-+                      GPIO_LAN2_LED1_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY4_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      }, {
-+              .name = "gpio46",
-+              .regmap[0] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_GPIO_2ND_I2C_MODE,
-+                      GPIO_LAN3_LED0_MODE_MASK,
-+                      GPIO_LAN3_LED0_MODE_MASK
-+              },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_LAN_LED1_MAPPING,
-+                      LAN4_LED_MAPPING_MASK,
-+                      LAN4_PHY4_LED_MAP
-+              },
-+              .regmap_size = 2,
-+      },
-+};
-+
-+static const struct airoha_pinctrl_func airoha_pinctrl_funcs[] = {
-+      PINCTRL_FUNC_DESC(pon),
-+      PINCTRL_FUNC_DESC(tod_1pps),
-+      PINCTRL_FUNC_DESC(sipo),
-+      PINCTRL_FUNC_DESC(mdio),
-+      PINCTRL_FUNC_DESC(uart),
-+      PINCTRL_FUNC_DESC(i2c),
-+      PINCTRL_FUNC_DESC(jtag),
-+      PINCTRL_FUNC_DESC(pcm),
-+      PINCTRL_FUNC_DESC(spi),
-+      PINCTRL_FUNC_DESC(pcm_spi),
-+      PINCTRL_FUNC_DESC(i2s),
-+      PINCTRL_FUNC_DESC(emmc),
-+      PINCTRL_FUNC_DESC(pnand),
-+      PINCTRL_FUNC_DESC(pcie_reset),
-+      PINCTRL_FUNC_DESC(pwm),
-+      PINCTRL_FUNC_DESC(phy1_led0),
-+      PINCTRL_FUNC_DESC(phy2_led0),
-+      PINCTRL_FUNC_DESC(phy3_led0),
-+      PINCTRL_FUNC_DESC(phy4_led0),
-+      PINCTRL_FUNC_DESC(phy1_led1),
-+      PINCTRL_FUNC_DESC(phy2_led1),
-+      PINCTRL_FUNC_DESC(phy3_led1),
-+      PINCTRL_FUNC_DESC(phy4_led1),
-+};
-+
-+static const struct airoha_pinctrl_conf airoha_pinctrl_pullup_conf[] = {
-+      PINCTRL_CONF_DESC(0, REG_I2C_SDA_PU, UART1_TXD_PU_MASK),
-+      PINCTRL_CONF_DESC(1, REG_I2C_SDA_PU, UART1_RXD_PU_MASK),
-+      PINCTRL_CONF_DESC(2, REG_I2C_SDA_PU, I2C_SDA_PU_MASK),
-+      PINCTRL_CONF_DESC(3, REG_I2C_SDA_PU, I2C_SCL_PU_MASK),
-+      PINCTRL_CONF_DESC(4, REG_I2C_SDA_PU, SPI_CS0_PU_MASK),
-+      PINCTRL_CONF_DESC(5, REG_I2C_SDA_PU, SPI_CLK_PU_MASK),
-+      PINCTRL_CONF_DESC(6, REG_I2C_SDA_PU, SPI_MOSI_PU_MASK),
-+      PINCTRL_CONF_DESC(7, REG_I2C_SDA_PU, SPI_MISO_PU_MASK),
-+      PINCTRL_CONF_DESC(13, REG_GPIO_L_PU, BIT(0)),
-+      PINCTRL_CONF_DESC(14, REG_GPIO_L_PU, BIT(1)),
-+      PINCTRL_CONF_DESC(15, REG_GPIO_L_PU, BIT(2)),
-+      PINCTRL_CONF_DESC(16, REG_GPIO_L_PU, BIT(3)),
-+      PINCTRL_CONF_DESC(17, REG_GPIO_L_PU, BIT(4)),
-+      PINCTRL_CONF_DESC(18, REG_GPIO_L_PU, BIT(5)),
-+      PINCTRL_CONF_DESC(19, REG_GPIO_L_PU, BIT(6)),
-+      PINCTRL_CONF_DESC(20, REG_GPIO_L_PU, BIT(7)),
-+      PINCTRL_CONF_DESC(21, REG_GPIO_L_PU, BIT(8)),
-+      PINCTRL_CONF_DESC(22, REG_GPIO_L_PU, BIT(9)),
-+      PINCTRL_CONF_DESC(23, REG_GPIO_L_PU, BIT(10)),
-+      PINCTRL_CONF_DESC(24, REG_GPIO_L_PU, BIT(11)),
-+      PINCTRL_CONF_DESC(25, REG_GPIO_L_PU, BIT(12)),
-+      PINCTRL_CONF_DESC(26, REG_GPIO_L_PU, BIT(13)),
-+      PINCTRL_CONF_DESC(27, REG_GPIO_L_PU, BIT(14)),
-+      PINCTRL_CONF_DESC(28, REG_GPIO_L_PU, BIT(15)),
-+      PINCTRL_CONF_DESC(29, REG_GPIO_L_PU, BIT(16)),
-+      PINCTRL_CONF_DESC(30, REG_GPIO_L_PU, BIT(17)),
-+      PINCTRL_CONF_DESC(31, REG_GPIO_L_PU, BIT(18)),
-+      PINCTRL_CONF_DESC(32, REG_GPIO_L_PU, BIT(18)),
-+      PINCTRL_CONF_DESC(33, REG_GPIO_L_PU, BIT(20)),
-+      PINCTRL_CONF_DESC(34, REG_GPIO_L_PU, BIT(21)),
-+      PINCTRL_CONF_DESC(35, REG_GPIO_L_PU, BIT(22)),
-+      PINCTRL_CONF_DESC(36, REG_GPIO_L_PU, BIT(23)),
-+      PINCTRL_CONF_DESC(37, REG_GPIO_L_PU, BIT(24)),
-+      PINCTRL_CONF_DESC(38, REG_GPIO_L_PU, BIT(25)),
-+      PINCTRL_CONF_DESC(39, REG_GPIO_L_PU, BIT(26)),
-+      PINCTRL_CONF_DESC(40, REG_GPIO_L_PU, BIT(27)),
-+      PINCTRL_CONF_DESC(41, REG_GPIO_L_PU, BIT(28)),
-+      PINCTRL_CONF_DESC(42, REG_GPIO_L_PU, BIT(29)),
-+      PINCTRL_CONF_DESC(43, REG_GPIO_L_PU, BIT(30)),
-+      PINCTRL_CONF_DESC(44, REG_GPIO_L_PU, BIT(31)),
-+      PINCTRL_CONF_DESC(45, REG_GPIO_H_PU, BIT(0)),
-+      PINCTRL_CONF_DESC(46, REG_GPIO_H_PU, BIT(1)),
-+      PINCTRL_CONF_DESC(47, REG_GPIO_H_PU, BIT(2)),
-+      PINCTRL_CONF_DESC(48, REG_GPIO_H_PU, BIT(3)),
-+      PINCTRL_CONF_DESC(49, REG_GPIO_H_PU, BIT(4)),
-+      PINCTRL_CONF_DESC(50, REG_GPIO_H_PU, BIT(5)),
-+      PINCTRL_CONF_DESC(51, REG_GPIO_H_PU, BIT(6)),
-+      PINCTRL_CONF_DESC(52, REG_GPIO_H_PU, BIT(7)),
-+      PINCTRL_CONF_DESC(53, REG_GPIO_H_PU, BIT(8)),
-+      PINCTRL_CONF_DESC(54, REG_GPIO_H_PU, BIT(9)),
-+      PINCTRL_CONF_DESC(55, REG_GPIO_H_PU, BIT(10)),
-+      PINCTRL_CONF_DESC(56, REG_GPIO_H_PU, BIT(11)),
-+      PINCTRL_CONF_DESC(57, REG_GPIO_H_PU, BIT(12)),
-+      PINCTRL_CONF_DESC(58, REG_GPIO_H_PU, BIT(13)),
-+      PINCTRL_CONF_DESC(59, REG_GPIO_H_PU, BIT(14)),
-+      PINCTRL_CONF_DESC(61, REG_I2C_SDA_PU, PCIE0_RESET_PU_MASK),
-+      PINCTRL_CONF_DESC(62, REG_I2C_SDA_PU, PCIE1_RESET_PU_MASK),
-+      PINCTRL_CONF_DESC(63, REG_I2C_SDA_PU, PCIE2_RESET_PU_MASK),
-+};
-+
-+static const struct airoha_pinctrl_conf airoha_pinctrl_pulldown_conf[] = {
-+      PINCTRL_CONF_DESC(0, REG_I2C_SDA_PD, UART1_TXD_PD_MASK),
-+      PINCTRL_CONF_DESC(1, REG_I2C_SDA_PD, UART1_RXD_PD_MASK),
-+      PINCTRL_CONF_DESC(2, REG_I2C_SDA_PD, I2C_SDA_PD_MASK),
-+      PINCTRL_CONF_DESC(3, REG_I2C_SDA_PD, I2C_SCL_PD_MASK),
-+      PINCTRL_CONF_DESC(4, REG_I2C_SDA_PD, SPI_CS0_PD_MASK),
-+      PINCTRL_CONF_DESC(5, REG_I2C_SDA_PD, SPI_CLK_PD_MASK),
-+      PINCTRL_CONF_DESC(6, REG_I2C_SDA_PD, SPI_MOSI_PD_MASK),
-+      PINCTRL_CONF_DESC(7, REG_I2C_SDA_PD, SPI_MISO_PD_MASK),
-+      PINCTRL_CONF_DESC(13, REG_GPIO_L_PD, BIT(0)),
-+      PINCTRL_CONF_DESC(14, REG_GPIO_L_PD, BIT(1)),
-+      PINCTRL_CONF_DESC(15, REG_GPIO_L_PD, BIT(2)),
-+      PINCTRL_CONF_DESC(16, REG_GPIO_L_PD, BIT(3)),
-+      PINCTRL_CONF_DESC(17, REG_GPIO_L_PD, BIT(4)),
-+      PINCTRL_CONF_DESC(18, REG_GPIO_L_PD, BIT(5)),
-+      PINCTRL_CONF_DESC(19, REG_GPIO_L_PD, BIT(6)),
-+      PINCTRL_CONF_DESC(20, REG_GPIO_L_PD, BIT(7)),
-+      PINCTRL_CONF_DESC(21, REG_GPIO_L_PD, BIT(8)),
-+      PINCTRL_CONF_DESC(22, REG_GPIO_L_PD, BIT(9)),
-+      PINCTRL_CONF_DESC(23, REG_GPIO_L_PD, BIT(10)),
-+      PINCTRL_CONF_DESC(24, REG_GPIO_L_PD, BIT(11)),
-+      PINCTRL_CONF_DESC(25, REG_GPIO_L_PD, BIT(12)),
-+      PINCTRL_CONF_DESC(26, REG_GPIO_L_PD, BIT(13)),
-+      PINCTRL_CONF_DESC(27, REG_GPIO_L_PD, BIT(14)),
-+      PINCTRL_CONF_DESC(28, REG_GPIO_L_PD, BIT(15)),
-+      PINCTRL_CONF_DESC(29, REG_GPIO_L_PD, BIT(16)),
-+      PINCTRL_CONF_DESC(30, REG_GPIO_L_PD, BIT(17)),
-+      PINCTRL_CONF_DESC(31, REG_GPIO_L_PD, BIT(18)),
-+      PINCTRL_CONF_DESC(32, REG_GPIO_L_PD, BIT(18)),
-+      PINCTRL_CONF_DESC(33, REG_GPIO_L_PD, BIT(20)),
-+      PINCTRL_CONF_DESC(34, REG_GPIO_L_PD, BIT(21)),
-+      PINCTRL_CONF_DESC(35, REG_GPIO_L_PD, BIT(22)),
-+      PINCTRL_CONF_DESC(36, REG_GPIO_L_PD, BIT(23)),
-+      PINCTRL_CONF_DESC(37, REG_GPIO_L_PD, BIT(24)),
-+      PINCTRL_CONF_DESC(38, REG_GPIO_L_PD, BIT(25)),
-+      PINCTRL_CONF_DESC(39, REG_GPIO_L_PD, BIT(26)),
-+      PINCTRL_CONF_DESC(40, REG_GPIO_L_PD, BIT(27)),
-+      PINCTRL_CONF_DESC(41, REG_GPIO_L_PD, BIT(28)),
-+      PINCTRL_CONF_DESC(42, REG_GPIO_L_PD, BIT(29)),
-+      PINCTRL_CONF_DESC(43, REG_GPIO_L_PD, BIT(30)),
-+      PINCTRL_CONF_DESC(44, REG_GPIO_L_PD, BIT(31)),
-+      PINCTRL_CONF_DESC(45, REG_GPIO_H_PD, BIT(0)),
-+      PINCTRL_CONF_DESC(46, REG_GPIO_H_PD, BIT(1)),
-+      PINCTRL_CONF_DESC(47, REG_GPIO_H_PD, BIT(2)),
-+      PINCTRL_CONF_DESC(48, REG_GPIO_H_PD, BIT(3)),
-+      PINCTRL_CONF_DESC(49, REG_GPIO_H_PD, BIT(4)),
-+      PINCTRL_CONF_DESC(50, REG_GPIO_H_PD, BIT(5)),
-+      PINCTRL_CONF_DESC(51, REG_GPIO_H_PD, BIT(6)),
-+      PINCTRL_CONF_DESC(52, REG_GPIO_H_PD, BIT(7)),
-+      PINCTRL_CONF_DESC(53, REG_GPIO_H_PD, BIT(8)),
-+      PINCTRL_CONF_DESC(54, REG_GPIO_H_PD, BIT(9)),
-+      PINCTRL_CONF_DESC(55, REG_GPIO_H_PD, BIT(10)),
-+      PINCTRL_CONF_DESC(56, REG_GPIO_H_PD, BIT(11)),
-+      PINCTRL_CONF_DESC(57, REG_GPIO_H_PD, BIT(12)),
-+      PINCTRL_CONF_DESC(58, REG_GPIO_H_PD, BIT(13)),
-+      PINCTRL_CONF_DESC(59, REG_GPIO_H_PD, BIT(14)),
-+      PINCTRL_CONF_DESC(61, REG_I2C_SDA_PD, PCIE0_RESET_PD_MASK),
-+      PINCTRL_CONF_DESC(62, REG_I2C_SDA_PD, PCIE1_RESET_PD_MASK),
-+      PINCTRL_CONF_DESC(63, REG_I2C_SDA_PD, PCIE2_RESET_PD_MASK),
-+};
-+
-+static const struct airoha_pinctrl_conf airoha_pinctrl_drive_e2_conf[] = {
-+      PINCTRL_CONF_DESC(0, REG_I2C_SDA_E2, UART1_TXD_E2_MASK),
-+      PINCTRL_CONF_DESC(1, REG_I2C_SDA_E2, UART1_RXD_E2_MASK),
-+      PINCTRL_CONF_DESC(2, REG_I2C_SDA_E2, I2C_SDA_E2_MASK),
-+      PINCTRL_CONF_DESC(3, REG_I2C_SDA_E2, I2C_SCL_E2_MASK),
-+      PINCTRL_CONF_DESC(4, REG_I2C_SDA_E2, SPI_CS0_E2_MASK),
-+      PINCTRL_CONF_DESC(5, REG_I2C_SDA_E2, SPI_CLK_E2_MASK),
-+      PINCTRL_CONF_DESC(6, REG_I2C_SDA_E2, SPI_MOSI_E2_MASK),
-+      PINCTRL_CONF_DESC(7, REG_I2C_SDA_E2, SPI_MISO_E2_MASK),
-+      PINCTRL_CONF_DESC(13, REG_GPIO_L_E2, BIT(0)),
-+      PINCTRL_CONF_DESC(14, REG_GPIO_L_E2, BIT(1)),
-+      PINCTRL_CONF_DESC(15, REG_GPIO_L_E2, BIT(2)),
-+      PINCTRL_CONF_DESC(16, REG_GPIO_L_E2, BIT(3)),
-+      PINCTRL_CONF_DESC(17, REG_GPIO_L_E2, BIT(4)),
-+      PINCTRL_CONF_DESC(18, REG_GPIO_L_E2, BIT(5)),
-+      PINCTRL_CONF_DESC(19, REG_GPIO_L_E2, BIT(6)),
-+      PINCTRL_CONF_DESC(20, REG_GPIO_L_E2, BIT(7)),
-+      PINCTRL_CONF_DESC(21, REG_GPIO_L_E2, BIT(8)),
-+      PINCTRL_CONF_DESC(22, REG_GPIO_L_E2, BIT(9)),
-+      PINCTRL_CONF_DESC(23, REG_GPIO_L_E2, BIT(10)),
-+      PINCTRL_CONF_DESC(24, REG_GPIO_L_E2, BIT(11)),
-+      PINCTRL_CONF_DESC(25, REG_GPIO_L_E2, BIT(12)),
-+      PINCTRL_CONF_DESC(26, REG_GPIO_L_E2, BIT(13)),
-+      PINCTRL_CONF_DESC(27, REG_GPIO_L_E2, BIT(14)),
-+      PINCTRL_CONF_DESC(28, REG_GPIO_L_E2, BIT(15)),
-+      PINCTRL_CONF_DESC(29, REG_GPIO_L_E2, BIT(16)),
-+      PINCTRL_CONF_DESC(30, REG_GPIO_L_E2, BIT(17)),
-+      PINCTRL_CONF_DESC(31, REG_GPIO_L_E2, BIT(18)),
-+      PINCTRL_CONF_DESC(32, REG_GPIO_L_E2, BIT(18)),
-+      PINCTRL_CONF_DESC(33, REG_GPIO_L_E2, BIT(20)),
-+      PINCTRL_CONF_DESC(34, REG_GPIO_L_E2, BIT(21)),
-+      PINCTRL_CONF_DESC(35, REG_GPIO_L_E2, BIT(22)),
-+      PINCTRL_CONF_DESC(36, REG_GPIO_L_E2, BIT(23)),
-+      PINCTRL_CONF_DESC(37, REG_GPIO_L_E2, BIT(24)),
-+      PINCTRL_CONF_DESC(38, REG_GPIO_L_E2, BIT(25)),
-+      PINCTRL_CONF_DESC(39, REG_GPIO_L_E2, BIT(26)),
-+      PINCTRL_CONF_DESC(40, REG_GPIO_L_E2, BIT(27)),
-+      PINCTRL_CONF_DESC(41, REG_GPIO_L_E2, BIT(28)),
-+      PINCTRL_CONF_DESC(42, REG_GPIO_L_E2, BIT(29)),
-+      PINCTRL_CONF_DESC(43, REG_GPIO_L_E2, BIT(30)),
-+      PINCTRL_CONF_DESC(44, REG_GPIO_L_E2, BIT(31)),
-+      PINCTRL_CONF_DESC(45, REG_GPIO_H_E2, BIT(0)),
-+      PINCTRL_CONF_DESC(46, REG_GPIO_H_E2, BIT(1)),
-+      PINCTRL_CONF_DESC(47, REG_GPIO_H_E2, BIT(2)),
-+      PINCTRL_CONF_DESC(48, REG_GPIO_H_E2, BIT(3)),
-+      PINCTRL_CONF_DESC(49, REG_GPIO_H_E2, BIT(4)),
-+      PINCTRL_CONF_DESC(50, REG_GPIO_H_E2, BIT(5)),
-+      PINCTRL_CONF_DESC(51, REG_GPIO_H_E2, BIT(6)),
-+      PINCTRL_CONF_DESC(52, REG_GPIO_H_E2, BIT(7)),
-+      PINCTRL_CONF_DESC(53, REG_GPIO_H_E2, BIT(8)),
-+      PINCTRL_CONF_DESC(54, REG_GPIO_H_E2, BIT(9)),
-+      PINCTRL_CONF_DESC(55, REG_GPIO_H_E2, BIT(10)),
-+      PINCTRL_CONF_DESC(56, REG_GPIO_H_E2, BIT(11)),
-+      PINCTRL_CONF_DESC(57, REG_GPIO_H_E2, BIT(12)),
-+      PINCTRL_CONF_DESC(58, REG_GPIO_H_E2, BIT(13)),
-+      PINCTRL_CONF_DESC(59, REG_GPIO_H_E2, BIT(14)),
-+      PINCTRL_CONF_DESC(61, REG_I2C_SDA_E2, PCIE0_RESET_E2_MASK),
-+      PINCTRL_CONF_DESC(62, REG_I2C_SDA_E2, PCIE1_RESET_E2_MASK),
-+      PINCTRL_CONF_DESC(63, REG_I2C_SDA_E2, PCIE2_RESET_E2_MASK),
-+};
-+
-+static const struct airoha_pinctrl_conf airoha_pinctrl_drive_e4_conf[] = {
-+      PINCTRL_CONF_DESC(0, REG_I2C_SDA_E4, UART1_TXD_E4_MASK),
-+      PINCTRL_CONF_DESC(1, REG_I2C_SDA_E4, UART1_RXD_E4_MASK),
-+      PINCTRL_CONF_DESC(2, REG_I2C_SDA_E4, I2C_SDA_E4_MASK),
-+      PINCTRL_CONF_DESC(3, REG_I2C_SDA_E4, I2C_SCL_E4_MASK),
-+      PINCTRL_CONF_DESC(4, REG_I2C_SDA_E4, SPI_CS0_E4_MASK),
-+      PINCTRL_CONF_DESC(5, REG_I2C_SDA_E4, SPI_CLK_E4_MASK),
-+      PINCTRL_CONF_DESC(6, REG_I2C_SDA_E4, SPI_MOSI_E4_MASK),
-+      PINCTRL_CONF_DESC(7, REG_I2C_SDA_E4, SPI_MISO_E4_MASK),
-+      PINCTRL_CONF_DESC(13, REG_GPIO_L_E4, BIT(0)),
-+      PINCTRL_CONF_DESC(14, REG_GPIO_L_E4, BIT(1)),
-+      PINCTRL_CONF_DESC(15, REG_GPIO_L_E4, BIT(2)),
-+      PINCTRL_CONF_DESC(16, REG_GPIO_L_E4, BIT(3)),
-+      PINCTRL_CONF_DESC(17, REG_GPIO_L_E4, BIT(4)),
-+      PINCTRL_CONF_DESC(18, REG_GPIO_L_E4, BIT(5)),
-+      PINCTRL_CONF_DESC(19, REG_GPIO_L_E4, BIT(6)),
-+      PINCTRL_CONF_DESC(20, REG_GPIO_L_E4, BIT(7)),
-+      PINCTRL_CONF_DESC(21, REG_GPIO_L_E4, BIT(8)),
-+      PINCTRL_CONF_DESC(22, REG_GPIO_L_E4, BIT(9)),
-+      PINCTRL_CONF_DESC(23, REG_GPIO_L_E4, BIT(10)),
-+      PINCTRL_CONF_DESC(24, REG_GPIO_L_E4, BIT(11)),
-+      PINCTRL_CONF_DESC(25, REG_GPIO_L_E4, BIT(12)),
-+      PINCTRL_CONF_DESC(26, REG_GPIO_L_E4, BIT(13)),
-+      PINCTRL_CONF_DESC(27, REG_GPIO_L_E4, BIT(14)),
-+      PINCTRL_CONF_DESC(28, REG_GPIO_L_E4, BIT(15)),
-+      PINCTRL_CONF_DESC(29, REG_GPIO_L_E4, BIT(16)),
-+      PINCTRL_CONF_DESC(30, REG_GPIO_L_E4, BIT(17)),
-+      PINCTRL_CONF_DESC(31, REG_GPIO_L_E4, BIT(18)),
-+      PINCTRL_CONF_DESC(32, REG_GPIO_L_E4, BIT(18)),
-+      PINCTRL_CONF_DESC(33, REG_GPIO_L_E4, BIT(20)),
-+      PINCTRL_CONF_DESC(34, REG_GPIO_L_E4, BIT(21)),
-+      PINCTRL_CONF_DESC(35, REG_GPIO_L_E4, BIT(22)),
-+      PINCTRL_CONF_DESC(36, REG_GPIO_L_E4, BIT(23)),
-+      PINCTRL_CONF_DESC(37, REG_GPIO_L_E4, BIT(24)),
-+      PINCTRL_CONF_DESC(38, REG_GPIO_L_E4, BIT(25)),
-+      PINCTRL_CONF_DESC(39, REG_GPIO_L_E4, BIT(26)),
-+      PINCTRL_CONF_DESC(40, REG_GPIO_L_E4, BIT(27)),
-+      PINCTRL_CONF_DESC(41, REG_GPIO_L_E4, BIT(28)),
-+      PINCTRL_CONF_DESC(42, REG_GPIO_L_E4, BIT(29)),
-+      PINCTRL_CONF_DESC(43, REG_GPIO_L_E4, BIT(30)),
-+      PINCTRL_CONF_DESC(44, REG_GPIO_L_E4, BIT(31)),
-+      PINCTRL_CONF_DESC(45, REG_GPIO_H_E4, BIT(0)),
-+      PINCTRL_CONF_DESC(46, REG_GPIO_H_E4, BIT(1)),
-+      PINCTRL_CONF_DESC(47, REG_GPIO_H_E4, BIT(2)),
-+      PINCTRL_CONF_DESC(48, REG_GPIO_H_E4, BIT(3)),
-+      PINCTRL_CONF_DESC(49, REG_GPIO_H_E4, BIT(4)),
-+      PINCTRL_CONF_DESC(50, REG_GPIO_H_E4, BIT(5)),
-+      PINCTRL_CONF_DESC(51, REG_GPIO_H_E4, BIT(6)),
-+      PINCTRL_CONF_DESC(52, REG_GPIO_H_E4, BIT(7)),
-+      PINCTRL_CONF_DESC(53, REG_GPIO_H_E4, BIT(8)),
-+      PINCTRL_CONF_DESC(54, REG_GPIO_H_E4, BIT(9)),
-+      PINCTRL_CONF_DESC(55, REG_GPIO_H_E4, BIT(10)),
-+      PINCTRL_CONF_DESC(56, REG_GPIO_H_E4, BIT(11)),
-+      PINCTRL_CONF_DESC(57, REG_GPIO_H_E4, BIT(12)),
-+      PINCTRL_CONF_DESC(58, REG_GPIO_H_E4, BIT(13)),
-+      PINCTRL_CONF_DESC(59, REG_GPIO_H_E4, BIT(14)),
-+      PINCTRL_CONF_DESC(61, REG_I2C_SDA_E4, PCIE0_RESET_E4_MASK),
-+      PINCTRL_CONF_DESC(62, REG_I2C_SDA_E4, PCIE1_RESET_E4_MASK),
-+      PINCTRL_CONF_DESC(63, REG_I2C_SDA_E4, PCIE2_RESET_E4_MASK),
-+};
-+
-+static const struct airoha_pinctrl_conf airoha_pinctrl_pcie_rst_od_conf[] = {
-+      PINCTRL_CONF_DESC(61, REG_PCIE_RESET_OD, PCIE0_RESET_OD_MASK),
-+      PINCTRL_CONF_DESC(62, REG_PCIE_RESET_OD, PCIE1_RESET_OD_MASK),
-+      PINCTRL_CONF_DESC(63, REG_PCIE_RESET_OD, PCIE2_RESET_OD_MASK),
-+};
-+
-+static int airoha_convert_pin_to_reg_offset(struct pinctrl_dev *pctrl_dev,
-+                                          struct pinctrl_gpio_range *range,
-+                                          int pin)
-+{
-+      if (!range)
-+              range = pinctrl_find_gpio_range_from_pin_nolock(pctrl_dev,
-+                                                              pin);
-+      if (!range)
-+              return -EINVAL;
-+
-+      return pin - range->pin_base;
-+}
-+
-+/* gpio callbacks */
-+static void airoha_gpio_set(struct gpio_chip *chip, unsigned int gpio,
-+                          int value)
-+{
-+      struct airoha_pinctrl *pinctrl = gpiochip_get_data(chip);
-+      u32 offset = gpio % AIROHA_PIN_BANK_SIZE;
-+      u8 index = gpio / AIROHA_PIN_BANK_SIZE;
-+
-+      regmap_update_bits(pinctrl->regmap, pinctrl->gpiochip.data[index],
-+                         BIT(offset), value ? BIT(offset) : 0);
-+}
-+
-+static int airoha_gpio_get(struct gpio_chip *chip, unsigned int gpio)
-+{
-+      struct airoha_pinctrl *pinctrl = gpiochip_get_data(chip);
-+      u32 val, pin = gpio % AIROHA_PIN_BANK_SIZE;
-+      u8 index = gpio / AIROHA_PIN_BANK_SIZE;
-+      int err;
-+
-+      err = regmap_read(pinctrl->regmap,
-+                        pinctrl->gpiochip.data[index], &val);
-+
-+      return err ? err : !!(val & BIT(pin));
-+}
-+
-+static int airoha_gpio_direction_output(struct gpio_chip *chip,
-+                                      unsigned int gpio, int value)
-+{
-+      int err;
-+
-+      err = pinctrl_gpio_direction_output(chip->base + gpio);
-+      if (err)
-+              return err;
-+
-+      airoha_gpio_set(chip, gpio, value);
-+
-+      return 0;
-+}
-+
-+/* irq callbacks */
-+static void airoha_irq_unmask(struct irq_data *data)
-+{
-+      u8 offset = data->hwirq % AIROHA_REG_GPIOCTRL_NUM_PIN;
-+      u8 index = data->hwirq / AIROHA_REG_GPIOCTRL_NUM_PIN;
-+      u32 mask = GENMASK(2 * offset + 1, 2 * offset);
-+      struct airoha_pinctrl_gpiochip *gpiochip;
-+      struct airoha_pinctrl *pinctrl;
-+      u32 val = BIT(2 * offset);
-+
-+      gpiochip = irq_data_get_irq_chip_data(data);
-+      if (WARN_ON_ONCE(data->hwirq >= ARRAY_SIZE(gpiochip->irq_type)))
-+              return;
-+
-+      pinctrl = container_of(gpiochip, struct airoha_pinctrl, gpiochip);
-+      switch (gpiochip->irq_type[data->hwirq]) {
-+      case IRQ_TYPE_LEVEL_LOW:
-+              val = val << 1;
-+              fallthrough;
-+      case IRQ_TYPE_LEVEL_HIGH:
-+              regmap_update_bits(pinctrl->regmap, gpiochip->level[index],
-+                                 mask, val);
-+              break;
-+      case IRQ_TYPE_EDGE_FALLING:
-+              val = val << 1;
-+              fallthrough;
-+      case IRQ_TYPE_EDGE_RISING:
-+              regmap_update_bits(pinctrl->regmap, gpiochip->edge[index],
-+                                 mask, val);
-+              break;
-+      case IRQ_TYPE_EDGE_BOTH:
-+              regmap_set_bits(pinctrl->regmap, gpiochip->edge[index], mask);
-+              break;
-+      default:
-+              break;
-+      }
-+}
-+
-+static void airoha_irq_mask(struct irq_data *data)
-+{
-+      u8 offset = data->hwirq % AIROHA_REG_GPIOCTRL_NUM_PIN;
-+      u8 index = data->hwirq / AIROHA_REG_GPIOCTRL_NUM_PIN;
-+      u32 mask = GENMASK(2 * offset + 1, 2 * offset);
-+      struct airoha_pinctrl_gpiochip *gpiochip;
-+      struct airoha_pinctrl *pinctrl;
-+
-+      gpiochip = irq_data_get_irq_chip_data(data);
-+      pinctrl = container_of(gpiochip, struct airoha_pinctrl, gpiochip);
-+
-+      regmap_clear_bits(pinctrl->regmap, gpiochip->level[index], mask);
-+      regmap_clear_bits(pinctrl->regmap, gpiochip->edge[index], mask);
-+}
-+
-+static int airoha_irq_type(struct irq_data *data, unsigned int type)
-+{
-+      struct airoha_pinctrl_gpiochip *gpiochip;
-+
-+      gpiochip = irq_data_get_irq_chip_data(data);
-+      if (data->hwirq >= ARRAY_SIZE(gpiochip->irq_type))
-+              return -EINVAL;
-+
-+      if (type == IRQ_TYPE_PROBE) {
-+              if (gpiochip->irq_type[data->hwirq])
-+                      return 0;
-+
-+              type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
-+      }
-+      gpiochip->irq_type[data->hwirq] = type & IRQ_TYPE_SENSE_MASK;
-+
-+      return 0;
-+}
-+
-+static irqreturn_t airoha_irq_handler(int irq, void *data)
-+{
-+      struct airoha_pinctrl *pinctrl = data;
-+      bool handled = false;
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(irq_status_regs); i++) {
-+              struct gpio_irq_chip *girq = &pinctrl->gpiochip.chip.irq;
-+              u32 status;
-+              int irq;
-+
-+              if (regmap_read(pinctrl->regmap, pinctrl->gpiochip.status[i],
-+                              &status))
-+                      continue;
-+
-+              for_each_set_bit(irq, (unsigned long *)&status,
-+                               AIROHA_PIN_BANK_SIZE) {
-+                      u32 offset = irq + i * AIROHA_PIN_BANK_SIZE;
-+
-+                      generic_handle_irq(irq_find_mapping(girq->domain,
-+                                                          offset));
-+                      regmap_write(pinctrl->regmap,
-+                                   pinctrl->gpiochip.status[i], BIT(irq));
-+              }
-+              handled |= !!status;
-+      }
-+
-+      return handled ? IRQ_HANDLED : IRQ_NONE;
-+}
-+
-+static const struct irq_chip airoha_gpio_irq_chip = {
-+      .name = "airoha-gpio-irq",
-+      .irq_unmask = airoha_irq_unmask,
-+      .irq_mask = airoha_irq_mask,
-+      .irq_mask_ack = airoha_irq_mask,
-+      .irq_set_type = airoha_irq_type,
-+      .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_IMMUTABLE,
-+};
-+
-+static int airoha_pinctrl_gpio_direction_input(struct gpio_chip *chip,
-+                                             unsigned int gpio)
-+{
-+      return pinctrl_gpio_direction_input(chip->base + gpio);
-+}
-+
-+static int airoha_pinctrl_add_gpiochip(struct airoha_pinctrl *pinctrl,
-+                                     struct platform_device *pdev)
-+{
-+      struct airoha_pinctrl_gpiochip *chip = &pinctrl->gpiochip;
-+      struct gpio_chip *gc = &chip->chip;
-+      struct gpio_irq_chip *girq = &gc->irq;
-+      struct device *dev = &pdev->dev;
-+      int irq, err;
-+
-+      chip->data = gpio_data_regs;
-+      chip->dir = gpio_dir_regs;
-+      chip->out = gpio_out_regs;
-+      chip->status = irq_status_regs;
-+      chip->level = irq_level_regs;
-+      chip->edge = irq_edge_regs;
-+
-+      gc->parent = dev;
-+      gc->label = dev_name(dev);
-+      gc->request = gpiochip_generic_request;
-+      gc->free = gpiochip_generic_free;
-+      gc->direction_input = airoha_pinctrl_gpio_direction_input;
-+      gc->direction_output = airoha_gpio_direction_output;
-+      gc->set = airoha_gpio_set;
-+      gc->get = airoha_gpio_get;
-+      gc->base = -1;
-+      gc->ngpio = AIROHA_NUM_PINS;
-+
-+      girq->default_type = IRQ_TYPE_NONE;
-+      girq->handler = handle_simple_irq;
-+      gpio_irq_chip_set_chip(girq, &airoha_gpio_irq_chip);
-+
-+      irq = platform_get_irq(pdev, 0);
-+      if (irq < 0)
-+              return irq;
-+
-+      err = devm_request_irq(dev, irq, airoha_irq_handler, IRQF_SHARED,
-+                             dev_name(dev), pinctrl);
-+      if (err) {
-+              dev_err(dev, "error requesting irq %d: %d\n", irq, err);
-+              return err;
-+      }
-+
-+      return devm_gpiochip_add_data(dev, gc, pinctrl);
-+}
-+
-+/* pinmux callbacks */
-+static int airoha_pinmux_set_mux(struct pinctrl_dev *pctrl_dev,
-+                               unsigned int selector,
-+                               unsigned int group)
-+{
-+      struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-+      const struct airoha_pinctrl_func *func;
-+      struct function_desc *desc;
-+      struct group_desc *grp;
-+      int i;
-+
-+      desc = pinmux_generic_get_function(pctrl_dev, selector);
-+      if (!desc)
-+              return -EINVAL;
-+
-+      grp = pinctrl_generic_get_group(pctrl_dev, group);
-+      if (!grp)
-+              return -EINVAL;
-+
-+      dev_dbg(pctrl_dev->dev, "enable function %s group %s\n",
-+              desc->name, grp->name);
-+
-+      func = desc->data;
-+      for (i = 0; i < func->group_size; i++) {
-+              const struct airoha_pinctrl_func_group *group;
-+              int j;
-+
-+              group = &func->groups[i];
-+              if (strcmp(group->name, grp->name))
-+                      continue;
-+
-+              for (j = 0; j < group->regmap_size; j++) {
-+                      switch (group->regmap[j].mux) {
-+                      case AIROHA_FUNC_PWM_EXT_MUX:
-+                      case AIROHA_FUNC_PWM_MUX:
-+                              regmap_update_bits(pinctrl->regmap,
-+                                                 group->regmap[j].offset,
-+                                                 group->regmap[j].mask,
-+                                                 group->regmap[j].val);
-+                              break;
-+                      default:
-+                              regmap_update_bits(pinctrl->chip_scu,
-+                                                 group->regmap[j].offset,
-+                                                 group->regmap[j].mask,
-+                                                 group->regmap[j].val);
-+                              break;
-+                      }
-+              }
-+              return 0;
-+      }
-+
-+      return -EINVAL;
-+}
-+
-+static int airoha_pinmux_set_direction(struct pinctrl_dev *pctrl_dev,
-+                                     struct pinctrl_gpio_range *range,
-+                                     unsigned int p, bool input)
-+{
-+      struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-+      u32 mask, index;
-+      int err, pin;
-+
-+      pin = airoha_convert_pin_to_reg_offset(pctrl_dev, range, p);
-+      if (pin < 0)
-+              return pin;
-+
-+      /* set output enable */
-+      mask = BIT(pin % AIROHA_PIN_BANK_SIZE);
-+      index = pin / AIROHA_PIN_BANK_SIZE;
-+      err = regmap_update_bits(pinctrl->regmap, pinctrl->gpiochip.out[index],
-+                               mask, !input ? mask : 0);
-+      if (err)
-+              return err;
-+
-+      /* set direction */
-+      mask = BIT(2 * (pin % AIROHA_REG_GPIOCTRL_NUM_PIN));
-+      index = pin / AIROHA_REG_GPIOCTRL_NUM_PIN;
-+      return regmap_update_bits(pinctrl->regmap,
-+                                pinctrl->gpiochip.dir[index], mask,
-+                                !input ? mask : 0);
-+}
-+
-+static const struct pinmux_ops airoha_pmxops = {
-+      .get_functions_count = pinmux_generic_get_function_count,
-+      .get_function_name = pinmux_generic_get_function_name,
-+      .get_function_groups = pinmux_generic_get_function_groups,
-+      .gpio_set_direction = airoha_pinmux_set_direction,
-+      .set_mux = airoha_pinmux_set_mux,
-+      .strict = true,
-+};
-+
-+/* pinconf callbacks */
-+static const struct airoha_pinctrl_reg *
-+airoha_pinctrl_get_conf_reg(const struct airoha_pinctrl_conf *conf,
-+                          int conf_size, int pin)
-+{
-+      int i;
-+
-+      for (i = 0; i < conf_size; i++) {
-+              if (conf[i].pin == pin)
-+                      return &conf[i].reg;
-+      }
-+
-+      return NULL;
-+}
-+
-+static int airoha_pinctrl_get_conf(struct airoha_pinctrl *pinctrl,
-+                                 const struct airoha_pinctrl_conf *conf,
-+                                 int conf_size, int pin, u32 *val)
-+{
-+      const struct airoha_pinctrl_reg *reg;
-+
-+      reg = airoha_pinctrl_get_conf_reg(conf, conf_size, pin);
-+      if (!reg)
-+              return -EINVAL;
-+
-+      if (regmap_read(pinctrl->chip_scu, reg->offset, val))
-+              return -EINVAL;
-+
-+      *val = (*val & reg->mask) >> __ffs(reg->mask);
-+
-+      return 0;
-+}
-+
-+static int airoha_pinctrl_set_conf(struct airoha_pinctrl *pinctrl,
-+                                 const struct airoha_pinctrl_conf *conf,
-+                                 int conf_size, int pin, u32 val)
-+{
-+      const struct airoha_pinctrl_reg *reg = NULL;
-+
-+      reg = airoha_pinctrl_get_conf_reg(conf, conf_size, pin);
-+      if (!reg)
-+              return -EINVAL;
-+
-+
-+      if (regmap_update_bits(pinctrl->chip_scu, reg->offset, reg->mask,
-+                             val << __ffs(reg->mask)))
-+              return -EINVAL;
-+
-+      return 0;
-+}
-+
-+#define airoha_pinctrl_get_pullup_conf(pinctrl, pin, val)                     \
-+      airoha_pinctrl_get_conf((pinctrl), airoha_pinctrl_pullup_conf,          \
-+                              ARRAY_SIZE(airoha_pinctrl_pullup_conf),         \
-+                              (pin), (val))
-+#define airoha_pinctrl_get_pulldown_conf(pinctrl, pin, val)                   \
-+      airoha_pinctrl_get_conf((pinctrl), airoha_pinctrl_pulldown_conf,        \
-+                              ARRAY_SIZE(airoha_pinctrl_pulldown_conf),       \
-+                              (pin), (val))
-+#define airoha_pinctrl_get_drive_e2_conf(pinctrl, pin, val)                   \
-+      airoha_pinctrl_get_conf((pinctrl), airoha_pinctrl_drive_e2_conf,        \
-+                              ARRAY_SIZE(airoha_pinctrl_drive_e2_conf),       \
-+                              (pin), (val))
-+#define airoha_pinctrl_get_drive_e4_conf(pinctrl, pin, val)                   \
-+      airoha_pinctrl_get_conf((pinctrl), airoha_pinctrl_drive_e4_conf,        \
-+                              ARRAY_SIZE(airoha_pinctrl_drive_e4_conf),       \
-+                              (pin), (val))
-+#define airoha_pinctrl_get_pcie_rst_od_conf(pinctrl, pin, val)                        \
-+      airoha_pinctrl_get_conf((pinctrl), airoha_pinctrl_pcie_rst_od_conf,     \
-+                              ARRAY_SIZE(airoha_pinctrl_pcie_rst_od_conf),    \
-+                              (pin), (val))
-+#define airoha_pinctrl_set_pullup_conf(pinctrl, pin, val)                     \
-+      airoha_pinctrl_set_conf((pinctrl), airoha_pinctrl_pullup_conf,          \
-+                              ARRAY_SIZE(airoha_pinctrl_pullup_conf),         \
-+                              (pin), (val))
-+#define airoha_pinctrl_set_pulldown_conf(pinctrl, pin, val)                   \
-+      airoha_pinctrl_set_conf((pinctrl), airoha_pinctrl_pulldown_conf,        \
-+                              ARRAY_SIZE(airoha_pinctrl_pulldown_conf),       \
-+                              (pin), (val))
-+#define airoha_pinctrl_set_drive_e2_conf(pinctrl, pin, val)                   \
-+      airoha_pinctrl_set_conf((pinctrl), airoha_pinctrl_drive_e2_conf,        \
-+                              ARRAY_SIZE(airoha_pinctrl_drive_e2_conf),       \
-+                              (pin), (val))
-+#define airoha_pinctrl_set_drive_e4_conf(pinctrl, pin, val)                   \
-+      airoha_pinctrl_set_conf((pinctrl), airoha_pinctrl_drive_e4_conf,        \
-+                              ARRAY_SIZE(airoha_pinctrl_drive_e4_conf),       \
-+                              (pin), (val))
-+#define airoha_pinctrl_set_pcie_rst_od_conf(pinctrl, pin, val)                        \
-+      airoha_pinctrl_set_conf((pinctrl), airoha_pinctrl_pcie_rst_od_conf,     \
-+                              ARRAY_SIZE(airoha_pinctrl_pcie_rst_od_conf),    \
-+                              (pin), (val))
-+
-+static int airoha_pinconf_get_direction(struct pinctrl_dev *pctrl_dev, u32 p)
-+{
-+      struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-+      u32 val, mask;
-+      int err, pin;
-+      u8 index;
-+
-+      pin = airoha_convert_pin_to_reg_offset(pctrl_dev, NULL, p);
-+      if (pin < 0)
-+              return pin;
-+
-+      index = pin / AIROHA_REG_GPIOCTRL_NUM_PIN;
-+      err = regmap_read(pinctrl->regmap, pinctrl->gpiochip.dir[index], &val);
-+      if (err)
-+              return err;
-+
-+      mask = BIT(2 * (pin % AIROHA_REG_GPIOCTRL_NUM_PIN));
-+      return val & mask ? PIN_CONFIG_OUTPUT_ENABLE : PIN_CONFIG_INPUT_ENABLE;
-+}
-+
-+static int airoha_pinconf_get(struct pinctrl_dev *pctrl_dev,
-+                            unsigned int pin, unsigned long *config)
-+{
-+      struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-+      enum pin_config_param param = pinconf_to_config_param(*config);
-+      u32 arg;
-+
-+      switch (param) {
-+      case PIN_CONFIG_BIAS_PULL_DOWN:
-+      case PIN_CONFIG_BIAS_DISABLE:
-+      case PIN_CONFIG_BIAS_PULL_UP: {
-+              u32 pull_up, pull_down;
-+
-+              if (airoha_pinctrl_get_pullup_conf(pinctrl, pin, &pull_up) ||
-+                  airoha_pinctrl_get_pulldown_conf(pinctrl, pin, &pull_down))
-+                      return -EINVAL;
-+
-+              if (param == PIN_CONFIG_BIAS_PULL_UP &&
-+                  !(pull_up && !pull_down))
-+                      return -EINVAL;
-+              else if (param == PIN_CONFIG_BIAS_PULL_DOWN &&
-+                       !(pull_down && !pull_up))
-+                      return -EINVAL;
-+              else if (pull_up || pull_down)
-+                      return -EINVAL;
-+
-+              arg = 1;
-+              break;
-+      }
-+      case PIN_CONFIG_DRIVE_STRENGTH: {
-+              u32 e2, e4;
-+
-+              if (airoha_pinctrl_get_drive_e2_conf(pinctrl, pin, &e2) ||
-+                  airoha_pinctrl_get_drive_e4_conf(pinctrl, pin, &e4))
-+                      return -EINVAL;
-+
-+              arg = e4 << 1 | e2;
-+              break;
-+      }
-+      case PIN_CONFIG_DRIVE_OPEN_DRAIN:
-+              if (airoha_pinctrl_get_pcie_rst_od_conf(pinctrl, pin, &arg))
-+                      return -EINVAL;
-+              break;
-+      case PIN_CONFIG_OUTPUT_ENABLE:
-+      case PIN_CONFIG_INPUT_ENABLE:
-+              arg = airoha_pinconf_get_direction(pctrl_dev, pin);
-+              if (arg != param)
-+                      return -EINVAL;
-+
-+              arg = 1;
-+              break;
-+      default:
-+              return -EOPNOTSUPP;
-+      }
-+
-+      *config = pinconf_to_config_packed(param, arg);
-+
-+      return 0;
-+}
-+
-+static int airoha_pinconf_set_pin_value(struct pinctrl_dev *pctrl_dev,
-+                                      unsigned int p, bool value)
-+{
-+      struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-+      int pin;
-+
-+      pin = airoha_convert_pin_to_reg_offset(pctrl_dev, NULL, p);
-+      if (pin < 0)
-+              return pin;
-+
-+      airoha_gpio_set(&pinctrl->gpiochip.chip, pin, value);
-+
-+      return 0;
-+}
-+
-+static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev,
-+                            unsigned int pin, unsigned long *configs,
-+                            unsigned int num_configs)
-+{
-+      struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-+      int i;
-+
-+      for (i = 0; i < num_configs; i++) {
-+              u32 param = pinconf_to_config_param(configs[i]);
-+              u32 arg = pinconf_to_config_argument(configs[i]);
-+
-+              switch (param) {
-+              case PIN_CONFIG_BIAS_DISABLE:
-+                      airoha_pinctrl_set_pulldown_conf(pinctrl, pin, 0);
-+                      airoha_pinctrl_set_pullup_conf(pinctrl, pin, 0);
-+                      break;
-+              case PIN_CONFIG_BIAS_PULL_UP:
-+                      airoha_pinctrl_set_pulldown_conf(pinctrl, pin, 0);
-+                      airoha_pinctrl_set_pullup_conf(pinctrl, pin, 1);
-+                      break;
-+              case PIN_CONFIG_BIAS_PULL_DOWN:
-+                      airoha_pinctrl_set_pulldown_conf(pinctrl, pin, 1);
-+                      airoha_pinctrl_set_pullup_conf(pinctrl, pin, 0);
-+                      break;
-+              case PIN_CONFIG_DRIVE_STRENGTH: {
-+                      u32 e2 = 0, e4 = 0;
-+
-+                      switch (arg) {
-+                      case MTK_DRIVE_2mA:
-+                              break;
-+                      case MTK_DRIVE_4mA:
-+                              e2 = 1;
-+                              break;
-+                      case MTK_DRIVE_6mA:
-+                              e4 = 1;
-+                              break;
-+                      case MTK_DRIVE_8mA:
-+                              e2 = 1;
-+                              e4 = 1;
-+                              break;
-+                      default:
-+                              return -EINVAL;
-+                      }
-+
-+                      airoha_pinctrl_set_drive_e2_conf(pinctrl, pin, e2);
-+                      airoha_pinctrl_set_drive_e4_conf(pinctrl, pin, e4);
-+                      break;
-+              }
-+              case PIN_CONFIG_DRIVE_OPEN_DRAIN:
-+                      airoha_pinctrl_set_pcie_rst_od_conf(pinctrl, pin, !!arg);
-+                      break;
-+              case PIN_CONFIG_OUTPUT_ENABLE:
-+              case PIN_CONFIG_INPUT_ENABLE:
-+              case PIN_CONFIG_OUTPUT: {
-+                      bool input = param == PIN_CONFIG_INPUT_ENABLE;
-+                      int err;
-+
-+                      err = airoha_pinmux_set_direction(pctrl_dev, NULL, pin,
-+                                                        input);
-+                      if (err)
-+                              return err;
-+
-+                      if (param == PIN_CONFIG_OUTPUT) {
-+                              err = airoha_pinconf_set_pin_value(pctrl_dev,
-+                                                                 pin, !!arg);
-+                              if (err)
-+                                      return err;
-+                      }
-+                      break;
-+              }
-+              default:
-+                      return -EOPNOTSUPP;
-+              }
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_pinconf_group_get(struct pinctrl_dev *pctrl_dev,
-+                                  unsigned int group, unsigned long *config)
-+{
-+      u32 cur_config = 0;
-+      int i;
-+
-+      for (i = 0; i < airoha_pinctrl_groups[group].npins; i++) {
-+              if (airoha_pinconf_get(pctrl_dev,
-+                                     airoha_pinctrl_groups[group].pins[i],
-+                                     config))
-+                      return -EOPNOTSUPP;
-+
-+              if (i && cur_config != *config)
-+                      return -EOPNOTSUPP;
-+
-+              cur_config = *config;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_pinconf_group_set(struct pinctrl_dev *pctrl_dev,
-+                                  unsigned int group, unsigned long *configs,
-+                                  unsigned int num_configs)
-+{
-+      int i;
-+
-+      for (i = 0; i < airoha_pinctrl_groups[group].npins; i++) {
-+              int err;
-+
-+              err = airoha_pinconf_set(pctrl_dev,
-+                                       airoha_pinctrl_groups[group].pins[i],
-+                                       configs, num_configs);
-+              if (err)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static const struct pinconf_ops airoha_confops = {
-+      .is_generic = true,
-+      .pin_config_get = airoha_pinconf_get,
-+      .pin_config_set = airoha_pinconf_set,
-+      .pin_config_group_get = airoha_pinconf_group_get,
-+      .pin_config_group_set = airoha_pinconf_group_set,
-+      .pin_config_config_dbg_show = pinconf_generic_dump_config,
-+};
-+
-+static const struct pinctrl_ops airoha_pctlops = {
-+      .get_groups_count = pinctrl_generic_get_group_count,
-+      .get_group_name = pinctrl_generic_get_group_name,
-+      .get_group_pins = pinctrl_generic_get_group_pins,
-+      .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
-+      .dt_free_map = pinconf_generic_dt_free_map,
-+};
-+
-+static struct pinctrl_desc airoha_pinctrl_desc = {
-+      .name = KBUILD_MODNAME,
-+      .owner = THIS_MODULE,
-+      .pctlops = &airoha_pctlops,
-+      .pmxops = &airoha_pmxops,
-+      .confops = &airoha_confops,
-+      .pins = airoha_pinctrl_pins,
-+      .npins = ARRAY_SIZE(airoha_pinctrl_pins),
-+};
-+
-+static int airoha_pinctrl_probe(struct platform_device *pdev)
-+{
-+      struct device *dev = &pdev->dev;
-+      struct airoha_pinctrl *pinctrl;
-+      struct regmap *map;
-+      int err, i;
-+
-+      pinctrl = devm_kzalloc(dev, sizeof(*pinctrl), GFP_KERNEL);
-+      if (!pinctrl)
-+              return -ENOMEM;
-+
-+      pinctrl->regmap = device_node_to_regmap(dev->parent->of_node);
-+      if (IS_ERR(pinctrl->regmap))
-+              return PTR_ERR(pinctrl->regmap);
-+
-+      map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu");
-+      if (IS_ERR(map))
-+              return PTR_ERR(map);
-+
-+      pinctrl->chip_scu = map;
-+
-+      err = devm_pinctrl_register_and_init(dev, &airoha_pinctrl_desc,
-+                                           pinctrl, &pinctrl->ctrl);
-+      if (err)
-+              return err;
-+
-+      /* build pin groups */
-+      for (i = 0; i < ARRAY_SIZE(airoha_pinctrl_groups); i++) {
-+              const struct pingroup *grp = &airoha_pinctrl_groups[i];
-+
-+              err = pinctrl_generic_add_group(pinctrl->ctrl, grp->name,
-+                                              (int *)grp->pins, grp->npins,
-+                                              (void *)grp);
-+              if (err < 0) {
-+                      dev_err(&pdev->dev, "Failed to register group %s\n",
-+                              grp->name);
-+                      return err;
-+              }
-+      }
-+
-+      /* build functions */
-+      for (i = 0; i < ARRAY_SIZE(airoha_pinctrl_funcs); i++) {
-+              const struct airoha_pinctrl_func *func;
-+
-+              func = &airoha_pinctrl_funcs[i];
-+              err = pinmux_generic_add_function(pinctrl->ctrl,
-+                                                func->desc.name,
-+                                                func->desc.group_names,
-+                                                func->desc.num_group_names,
-+                                                (void *)func);
-+              if (err < 0) {
-+                      dev_err(dev, "Failed to register function %s\n",
-+                              func->desc.name);
-+                      return err;
-+              }
-+      }
-+
-+      err = pinctrl_enable(pinctrl->ctrl);
-+      if (err)
-+              return err;
-+
-+      /* build gpio-chip */
-+      return airoha_pinctrl_add_gpiochip(pinctrl, pdev);
-+}
-+
-+static const struct of_device_id airoha_pinctrl_of_match[] = {
-+      { .compatible = "airoha,en7581-pinctrl" },
-+      { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, airoha_pinctrl_of_match);
-+
-+static struct platform_driver airoha_pinctrl_driver = {
-+      .probe = airoha_pinctrl_probe,
-+      .driver = {
-+              .name = "pinctrl-airoha",
-+              .of_match_table = airoha_pinctrl_of_match,
-+      },
-+};
-+module_platform_driver(airoha_pinctrl_driver);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
-+MODULE_AUTHOR("Benjamin Larsson <benjamin.larsson@genexis.eu>");
-+MODULE_AUTHOR("Markus Gothe <markus.gothe@genexis.eu>");
-+MODULE_DESCRIPTION("Pinctrl driver for Airoha SoC");
diff --git a/target/linux/airoha/patches-6.6/035-v6.13-clk-en7523-Fix-wrong-BUS-clock-for-EN7581.patch b/target/linux/airoha/patches-6.6/035-v6.13-clk-en7523-Fix-wrong-BUS-clock-for-EN7581.patch
deleted file mode 100644 (file)
index 89b970f..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-From 2eb75f86d52565367211c51334d15fe672633085 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Sat, 16 Nov 2024 11:56:53 +0100
-Subject: [PATCH] clk: en7523: Fix wrong BUS clock for EN7581
-
-The Documentation for EN7581 had a typo and still referenced the EN7523
-BUS base source frequency. This was in conflict with a different page in
-the Documentration that state that the BUS runs at 300MHz (600MHz source
-with divisor set to 2) and the actual watchdog that tick at half the BUS
-clock (150MHz). This was verified with the watchdog by timing the
-seconds that the system takes to reboot (due too watchdog) and by
-operating on different values of the BUS divisor.
-
-The correct values for source of BUS clock are 600MHz and 540MHz.
-
-This was also confirmed by Airoha.
-
-Cc: stable@vger.kernel.org
-Fixes: 66bc47326ce2 ("clk: en7523: Add EN7581 support")
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Link: https://lore.kernel.org/r/20241116105710.19748-1-ansuelsmth@gmail.com
-Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 5 +++--
- 1 file changed, 3 insertions(+), 2 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -87,6 +87,7 @@ static const u32 slic_base[] = { 1000000
- static const u32 npu_base[] = { 333000000, 400000000, 500000000 };
- /* EN7581 */
- static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 };
-+static const u32 bus7581_base[] = { 600000000, 540000000 };
- static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
- static const u32 crypto_base[] = { 540000000, 480000000 };
-@@ -222,8 +223,8 @@ static const struct en_clk_desc en7581_b
-               .base_reg = REG_BUS_CLK_DIV_SEL,
-               .base_bits = 1,
-               .base_shift = 8,
--              .base_values = bus_base,
--              .n_base_values = ARRAY_SIZE(bus_base),
-+              .base_values = bus7581_base,
-+              .n_base_values = ARRAY_SIZE(bus7581_base),
-               .div_bits = 3,
-               .div_shift = 0,
diff --git a/target/linux/airoha/patches-6.6/036-v6.13-net-airoha-Fix-typo-in-REG_CDM2_FWD_CFG-configuratio.patch b/target/linux/airoha/patches-6.6/036-v6.13-net-airoha-Fix-typo-in-REG_CDM2_FWD_CFG-configuratio.patch
deleted file mode 100644 (file)
index a711971..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-From 30d9d8f6a2d7e44a9f91737dd409dbc87ac6f6b7 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 15 Oct 2024 09:58:09 +0200
-Subject: [PATCH] net: airoha: Fix typo in REG_CDM2_FWD_CFG configuration
-
-Fix typo in airoha_fe_init routine configuring CDM2_OAM_QSEL_MASK field
-of REG_CDM2_FWD_CFG register.
-This bug is not introducing any user visible problem since Frame Engine
-CDM2 port is used just by the second QDMA block and we currently enable
-just QDMA1 block connected to the MT7530 dsa switch via CDM1 port.
-
-Introduced by commit 23020f049327 ("net: airoha: Introduce ethernet
-support for EN7581 SoC")
-
-Reported-by: ChihWei Cheng <chihwei.cheng@airoha.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Message-ID: <20241015-airoha-eth-cdm2-fixes-v1-1-9dc6993286c3@kernel.org>
-Signed-off-by: Andrew Lunn <andrew@lunn.ch>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -1369,7 +1369,8 @@ static int airoha_fe_init(struct airoha_
-       airoha_fe_set(eth, REG_GDM_MISC_CFG,
-                     GDM2_RDM_ACK_WAIT_PREF_MASK |
-                     GDM2_CHN_VLD_MODE_MASK);
--      airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 15);
-+      airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK,
-+                    FIELD_PREP(CDM2_OAM_QSEL_MASK, 15));
-       /* init fragment and assemble Force Port */
-       /* NPU Core-3, NPU Bridge Channel-3 */
diff --git a/target/linux/airoha/patches-6.6/037-v6.14-net-airoha-Fix-error-path-in-airoha_probe.patch b/target/linux/airoha/patches-6.6/037-v6.14-net-airoha-Fix-error-path-in-airoha_probe.patch
deleted file mode 100644 (file)
index 9499c15..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-From 0c7469ee718e1dd929f52bfb142a7f6fb68f0765 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 16 Dec 2024 18:47:33 +0100
-Subject: [PATCH] net: airoha: Fix error path in airoha_probe()
-
-Do not run napi_disable() if airoha_hw_init() fails since Tx/Rx napi
-has not been started yet. In order to fix the issue, introduce
-airoha_qdma_stop_napi routine and remove napi_disable in
-airoha_hw_cleanup().
-
-Fixes: 23020f049327 ("net: airoha: Introduce ethernet support for EN7581 SoC")
-Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20241216-airoha_probe-error-path-fix-v2-1-6b10e04e9a5c@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 33 ++++++++++++++++------
- 1 file changed, 25 insertions(+), 8 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -2139,17 +2139,14 @@ static void airoha_hw_cleanup(struct air
-               if (!qdma->q_rx[i].ndesc)
-                       continue;
--              napi_disable(&qdma->q_rx[i].napi);
-               netif_napi_del(&qdma->q_rx[i].napi);
-               airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
-               if (qdma->q_rx[i].page_pool)
-                       page_pool_destroy(qdma->q_rx[i].page_pool);
-       }
--      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
--              napi_disable(&qdma->q_tx_irq[i].napi);
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
-               netif_napi_del(&qdma->q_tx_irq[i].napi);
--      }
-       for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-               if (!qdma->q_tx[i].ndesc)
-@@ -2174,6 +2171,21 @@ static void airoha_qdma_start_napi(struc
-       }
- }
-+static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
-+              napi_disable(&qdma->q_tx_irq[i].napi);
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-+              if (!qdma->q_rx[i].ndesc)
-+                      continue;
-+
-+              napi_disable(&qdma->q_rx[i].napi);
-+      }
-+}
-+
- static void airoha_update_hw_stats(struct airoha_gdm_port *port)
- {
-       struct airoha_eth *eth = port->qdma->eth;
-@@ -2731,7 +2743,7 @@ static int airoha_probe(struct platform_
-       err = airoha_hw_init(pdev, eth);
-       if (err)
--              goto error;
-+              goto error_hw_cleanup;
-       for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
-               airoha_qdma_start_napi(&eth->qdma[i]);
-@@ -2746,13 +2758,16 @@ static int airoha_probe(struct platform_
-               err = airoha_alloc_gdm_port(eth, np);
-               if (err) {
-                       of_node_put(np);
--                      goto error;
-+                      goto error_napi_stop;
-               }
-       }
-       return 0;
--error:
-+error_napi_stop:
-+      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
-+              airoha_qdma_stop_napi(&eth->qdma[i]);
-+error_hw_cleanup:
-       for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
-               airoha_hw_cleanup(&eth->qdma[i]);
-@@ -2773,8 +2788,10 @@ static void airoha_remove(struct platfor
-       struct airoha_eth *eth = platform_get_drvdata(pdev);
-       int i;
--      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
-+      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
-+              airoha_qdma_stop_napi(&eth->qdma[i]);
-               airoha_hw_cleanup(&eth->qdma[i]);
-+      }
-       for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
-               struct airoha_gdm_port *port = eth->ports[i];
diff --git a/target/linux/airoha/patches-6.6/038-01-v6.14-net-airoha-Enable-Tx-drop-capability-for-each-Tx-DMA.patch b/target/linux/airoha/patches-6.6/038-01-v6.14-net-airoha-Enable-Tx-drop-capability-for-each-Tx-DMA.patch
deleted file mode 100644 (file)
index c8681aa..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-From 5f795590380476f1c9b7ed0ac945c9b0269dc23a Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 3 Jan 2025 13:17:02 +0100
-Subject: [PATCH 1/4] net: airoha: Enable Tx drop capability for each Tx DMA
- ring
-
-This is a preliminary patch in order to enable hw Qdisc offloading.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -1790,6 +1790,10 @@ static int airoha_qdma_init_tx_queue(str
-               WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
-       }
-+      /* xmit ring drop default setting */
-+      airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
-+                      TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
-+
-       airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
-       airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
-                       FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
diff --git a/target/linux/airoha/patches-6.6/038-02-v6.14-net-airoha-Introduce-ndo_select_queue-callback.patch b/target/linux/airoha/patches-6.6/038-02-v6.14-net-airoha-Introduce-ndo_select_queue-callback.patch
deleted file mode 100644 (file)
index 75743bd..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-From 2b288b81560b94958cd68bbe54673e55a1730c95 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 3 Jan 2025 13:17:03 +0100
-Subject: [PATCH 2/4] net: airoha: Introduce ndo_select_queue callback
-
-Airoha EN7581 SoC supports 32 Tx DMA rings used to feed packets to QoS
-channels. Each channels supports 8 QoS queues where the user can apply
-QoS scheduling policies. In a similar way, the user can configure hw
-rate shaping for each QoS channel.
-Introduce ndo_select_queue callback in order to select the tx queue
-based on QoS channel and QoS queue. In particular, for dsa device select
-QoS channel according to the dsa user port index, rely on port id
-otherwise. Select QoS queue based on the skb priority.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 30 ++++++++++++++++++++--
- 1 file changed, 28 insertions(+), 2 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -23,6 +23,8 @@
- #define AIROHA_MAX_NUM_XSI_RSTS               5
- #define AIROHA_MAX_MTU                        2000
- #define AIROHA_MAX_PACKET_SIZE                2048
-+#define AIROHA_NUM_QOS_CHANNELS               4
-+#define AIROHA_NUM_QOS_QUEUES         8
- #define AIROHA_NUM_TX_RING            32
- #define AIROHA_NUM_RX_RING            32
- #define AIROHA_FE_MC_MAX_VLAN_TABLE   64
-@@ -2422,21 +2424,44 @@ static void airoha_dev_get_stats64(struc
-       } while (u64_stats_fetch_retry(&port->stats.syncp, start));
- }
-+static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
-+                                 struct net_device *sb_dev)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      int queue, channel;
-+
-+      /* For dsa device select QoS channel according to the dsa user port
-+       * index, rely on port id otherwise. Select QoS queue based on the
-+       * skb priority.
-+       */
-+      channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
-+      channel = channel % AIROHA_NUM_QOS_CHANNELS;
-+      queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
-+      queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
-+
-+      return queue < dev->num_tx_queues ? queue : 0;
-+}
-+
- static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
-                                  struct net_device *dev)
- {
-       struct skb_shared_info *sinfo = skb_shinfo(skb);
-       struct airoha_gdm_port *port = netdev_priv(dev);
--      u32 msg0 = 0, msg1, len = skb_headlen(skb);
--      int i, qid = skb_get_queue_mapping(skb);
-+      u32 msg0, msg1, len = skb_headlen(skb);
-       struct airoha_qdma *qdma = port->qdma;
-       u32 nr_frags = 1 + sinfo->nr_frags;
-       struct netdev_queue *txq;
-       struct airoha_queue *q;
-       void *data = skb->data;
-+      int i, qid;
-       u16 index;
-       u8 fport;
-+      qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
-+      msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
-+                        qid / AIROHA_NUM_QOS_QUEUES) |
-+             FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
-+                        qid % AIROHA_NUM_QOS_QUEUES);
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
-                       FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
-@@ -2610,6 +2635,7 @@ static const struct net_device_ops airoh
-       .ndo_init               = airoha_dev_init,
-       .ndo_open               = airoha_dev_open,
-       .ndo_stop               = airoha_dev_stop,
-+      .ndo_select_queue       = airoha_dev_select_queue,
-       .ndo_start_xmit         = airoha_dev_xmit,
-       .ndo_get_stats64        = airoha_dev_get_stats64,
-       .ndo_set_mac_address    = airoha_dev_set_macaddr,
diff --git a/target/linux/airoha/patches-6.6/038-03-v6.14-net-airoha-Add-sched-ETS-offload-support.patch b/target/linux/airoha/patches-6.6/038-03-v6.14-net-airoha-Add-sched-ETS-offload-support.patch
deleted file mode 100644 (file)
index ad5e0e5..0000000
+++ /dev/null
@@ -1,292 +0,0 @@
-From 20bf7d07c956e5c7a22d3076c599cbb7a6054917 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 3 Jan 2025 13:17:04 +0100
-Subject: [PATCH 3/4] net: airoha: Add sched ETS offload support
-
-Introduce support for ETS Qdisc offload available on the Airoha EN7581
-ethernet controller. In order to be effective, ETS Qdisc must configured
-as leaf of a HTB Qdisc (HTB Qdisc offload will be added in the following
-patch). ETS Qdisc available on EN7581 ethernet controller supports at
-most 8 concurrent bands (QoS queues). We can enable an ETS Qdisc for
-each available QoS channel.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 196 ++++++++++++++++++++-
- 1 file changed, 195 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -15,6 +15,7 @@
- #include <linux/u64_stats_sync.h>
- #include <net/dsa.h>
- #include <net/page_pool/helpers.h>
-+#include <net/pkt_cls.h>
- #include <uapi/linux/ppp_defs.h>
- #define AIROHA_MAX_NUM_GDM_PORTS      1
-@@ -543,9 +544,24 @@
- #define INGRESS_SLOW_TICK_RATIO_MASK  GENMASK(29, 16)
- #define INGRESS_FAST_TICK_MASK                GENMASK(15, 0)
-+#define REG_QUEUE_CLOSE_CFG(_n)               (0x00a0 + ((_n) & 0xfc))
-+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m)   BIT((_m) + (((_n) & 0x3) << 3))
-+
- #define REG_TXQ_DIS_CFG_BASE(_n)      ((_n) ? 0x20a0 : 0x00a0)
- #define REG_TXQ_DIS_CFG(_n, _m)               (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
-+#define REG_CNTR_CFG(_n)              (0x0400 + ((_n) << 3))
-+#define CNTR_EN_MASK                  BIT(31)
-+#define CNTR_ALL_CHAN_EN_MASK         BIT(30)
-+#define CNTR_ALL_QUEUE_EN_MASK                BIT(29)
-+#define CNTR_ALL_DSCP_RING_EN_MASK    BIT(28)
-+#define CNTR_SRC_MASK                 GENMASK(27, 24)
-+#define CNTR_DSCP_RING_MASK           GENMASK(20, 16)
-+#define CNTR_CHAN_MASK                        GENMASK(7, 3)
-+#define CNTR_QUEUE_MASK                       GENMASK(2, 0)
-+
-+#define REG_CNTR_VAL(_n)              (0x0404 + ((_n) << 3))
-+
- #define REG_LMGR_INIT_CFG             0x1000
- #define LMGR_INIT_START                       BIT(31)
- #define LMGR_SRAM_MODE_MASK           BIT(30)
-@@ -571,9 +587,19 @@
- #define TWRR_WEIGHT_SCALE_MASK                BIT(31)
- #define TWRR_WEIGHT_BASE_MASK         BIT(3)
-+#define REG_TXWRR_WEIGHT_CFG          0x1024
-+#define TWRR_RW_CMD_MASK              BIT(31)
-+#define TWRR_RW_CMD_DONE              BIT(30)
-+#define TWRR_CHAN_IDX_MASK            GENMASK(23, 19)
-+#define TWRR_QUEUE_IDX_MASK           GENMASK(18, 16)
-+#define TWRR_VALUE_MASK                       GENMASK(15, 0)
-+
- #define REG_PSE_BUF_USAGE_CFG         0x1028
- #define PSE_BUF_ESTIMATE_EN_MASK      BIT(29)
-+#define REG_CHAN_QOS_MODE(_n)         (0x1040 + ((_n) << 2))
-+#define CHAN_QOS_MODE_MASK(_n)                GENMASK(2 + ((_n) << 2), (_n) << 2)
-+
- #define REG_GLB_TRTCM_CFG             0x1080
- #define GLB_TRTCM_EN_MASK             BIT(31)
- #define GLB_TRTCM_MODE_MASK           BIT(30)
-@@ -722,6 +748,17 @@ enum {
-       FE_PSE_PORT_DROP = 0xf,
- };
-+enum tx_sched_mode {
-+      TC_SCH_WRR8,
-+      TC_SCH_SP,
-+      TC_SCH_WRR7,
-+      TC_SCH_WRR6,
-+      TC_SCH_WRR5,
-+      TC_SCH_WRR4,
-+      TC_SCH_WRR3,
-+      TC_SCH_WRR2,
-+};
-+
- struct airoha_queue_entry {
-       union {
-               void *buf;
-@@ -812,6 +849,10 @@ struct airoha_gdm_port {
-       int id;
-       struct airoha_hw_stats stats;
-+
-+      /* qos stats counters */
-+      u64 cpu_tx_packets;
-+      u64 fwd_tx_packets;
- };
- struct airoha_eth {
-@@ -1962,6 +2003,27 @@ static void airoha_qdma_init_qos(struct
-                       FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
- }
-+static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
-+{
-+      int i;
-+
-+      for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
-+              /* Tx-cpu transferred count */
-+              airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
-+              airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
-+                             CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
-+                             CNTR_ALL_DSCP_RING_EN_MASK |
-+                             FIELD_PREP(CNTR_CHAN_MASK, i));
-+              /* Tx-fwd transferred count */
-+              airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
-+              airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
-+                             CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
-+                             CNTR_ALL_DSCP_RING_EN_MASK |
-+                             FIELD_PREP(CNTR_SRC_MASK, 1) |
-+                             FIELD_PREP(CNTR_CHAN_MASK, i));
-+      }
-+}
-+
- static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
- {
-       int i;
-@@ -2012,6 +2074,7 @@ static int airoha_qdma_hw_init(struct ai
-       airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
-                       TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
-+      airoha_qdma_init_qos_stats(qdma);
-       return 0;
- }
-@@ -2631,6 +2694,135 @@ airoha_ethtool_get_rmon_stats(struct net
-       } while (u64_stats_fetch_retry(&port->stats.syncp, start));
- }
-+static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
-+                                       int channel, enum tx_sched_mode mode,
-+                                       const u16 *weights, u8 n_weights)
-+{
-+      int i;
-+
-+      for (i = 0; i < AIROHA_NUM_TX_RING; i++)
-+              airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
-+                                TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
-+
-+      for (i = 0; i < n_weights; i++) {
-+              u32 status;
-+              int err;
-+
-+              airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
-+                             TWRR_RW_CMD_MASK |
-+                             FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
-+                             FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
-+                             FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
-+              err = read_poll_timeout(airoha_qdma_rr, status,
-+                                      status & TWRR_RW_CMD_DONE,
-+                                      USEC_PER_MSEC, 10 * USEC_PER_MSEC,
-+                                      true, port->qdma,
-+                                      REG_TXWRR_WEIGHT_CFG);
-+              if (err)
-+                      return err;
-+      }
-+
-+      airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
-+                      CHAN_QOS_MODE_MASK(channel),
-+                      mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
-+
-+      return 0;
-+}
-+
-+static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
-+                                       int channel)
-+{
-+      static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
-+
-+      return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
-+                                           ARRAY_SIZE(w));
-+}
-+
-+static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
-+                                      int channel,
-+                                      struct tc_ets_qopt_offload *opt)
-+{
-+      struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
-+      enum tx_sched_mode mode = TC_SCH_SP;
-+      u16 w[AIROHA_NUM_QOS_QUEUES] = {};
-+      int i, nstrict = 0;
-+
-+      if (p->bands > AIROHA_NUM_QOS_QUEUES)
-+              return -EINVAL;
-+
-+      for (i = 0; i < p->bands; i++) {
-+              if (!p->quanta[i])
-+                      nstrict++;
-+      }
-+
-+      /* this configuration is not supported by the hw */
-+      if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
-+              return -EINVAL;
-+
-+      for (i = 0; i < p->bands - nstrict; i++)
-+              w[i] = p->weights[nstrict + i];
-+
-+      if (!nstrict)
-+              mode = TC_SCH_WRR8;
-+      else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
-+              mode = nstrict + 1;
-+
-+      return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
-+                                           ARRAY_SIZE(w));
-+}
-+
-+static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
-+                                      int channel,
-+                                      struct tc_ets_qopt_offload *opt)
-+{
-+      u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
-+                                          REG_CNTR_VAL(channel << 1));
-+      u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
-+                                          REG_CNTR_VAL((channel << 1) + 1));
-+      u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
-+                       (fwd_tx_packets - port->fwd_tx_packets);
-+      _bstats_update(opt->stats.bstats, 0, tx_packets);
-+
-+      port->cpu_tx_packets = cpu_tx_packets;
-+      port->fwd_tx_packets = fwd_tx_packets;
-+
-+      return 0;
-+}
-+
-+static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
-+                                   struct tc_ets_qopt_offload *opt)
-+{
-+      int channel = TC_H_MAJ(opt->handle) >> 16;
-+
-+      if (opt->parent == TC_H_ROOT)
-+              return -EINVAL;
-+
-+      switch (opt->command) {
-+      case TC_ETS_REPLACE:
-+              return airoha_qdma_set_tx_ets_sched(port, channel, opt);
-+      case TC_ETS_DESTROY:
-+              /* PRIO is default qdisc scheduler */
-+              return airoha_qdma_set_tx_prio_sched(port, channel);
-+      case TC_ETS_STATS:
-+              return airoha_qdma_get_tx_ets_stats(port, channel, opt);
-+      default:
-+              return -EOPNOTSUPP;
-+      }
-+}
-+
-+static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
-+                             void *type_data)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+
-+      switch (type) {
-+      case TC_SETUP_QDISC_ETS:
-+              return airoha_tc_setup_qdisc_ets(port, type_data);
-+      default:
-+              return -EOPNOTSUPP;
-+      }
-+}
-+
- static const struct net_device_ops airoha_netdev_ops = {
-       .ndo_init               = airoha_dev_init,
-       .ndo_open               = airoha_dev_open,
-@@ -2639,6 +2831,7 @@ static const struct net_device_ops airoh
-       .ndo_start_xmit         = airoha_dev_xmit,
-       .ndo_get_stats64        = airoha_dev_get_stats64,
-       .ndo_set_mac_address    = airoha_dev_set_macaddr,
-+      .ndo_setup_tc           = airoha_dev_tc_setup,
- };
- static const struct ethtool_ops airoha_ethtool_ops = {
-@@ -2688,7 +2881,8 @@ static int airoha_alloc_gdm_port(struct
-       dev->watchdog_timeo = 5 * HZ;
-       dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-                          NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
--                         NETIF_F_SG | NETIF_F_TSO;
-+                         NETIF_F_SG | NETIF_F_TSO |
-+                         NETIF_F_HW_TC;
-       dev->features |= dev->hw_features;
-       dev->dev.of_node = np;
-       dev->irq = qdma->irq;
diff --git a/target/linux/airoha/patches-6.6/038-04-v6.14-net-airoha-Add-sched-HTB-offload-support.patch b/target/linux/airoha/patches-6.6/038-04-v6.14-net-airoha-Add-sched-HTB-offload-support.patch
deleted file mode 100644 (file)
index 1239b17..0000000
+++ /dev/null
@@ -1,371 +0,0 @@
-From ef1ca9271313b4ea7b03de69576aacef1e78f381 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 3 Jan 2025 13:17:05 +0100
-Subject: [PATCH 4/4] net: airoha: Add sched HTB offload support
-
-Introduce support for HTB Qdisc offload available in the Airoha EN7581
-ethernet controller. EN7581 can offload only one level of HTB leafs.
-Each HTB leaf represents a QoS channel supported by EN7581 SoC.
-The typical use-case is creating a HTB leaf for QoS channel to rate
-limit the egress traffic and attach an ETS Qdisc to each HTB leaf in
-order to enforce traffic prioritization.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 288 ++++++++++++++++++++-
- 1 file changed, 287 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -28,6 +28,8 @@
- #define AIROHA_NUM_QOS_QUEUES         8
- #define AIROHA_NUM_TX_RING            32
- #define AIROHA_NUM_RX_RING            32
-+#define AIROHA_NUM_NETDEV_TX_RINGS    (AIROHA_NUM_TX_RING + \
-+                                       AIROHA_NUM_QOS_CHANNELS)
- #define AIROHA_FE_MC_MAX_VLAN_TABLE   64
- #define AIROHA_FE_MC_MAX_VLAN_PORT    16
- #define AIROHA_NUM_TX_IRQ             2
-@@ -43,6 +45,9 @@
- #define PSE_RSV_PAGES                 128
- #define PSE_QUEUE_RSV_PAGES           64
-+#define QDMA_METER_IDX(_n)            ((_n) & 0xff)
-+#define QDMA_METER_GROUP(_n)          (((_n) >> 8) & 0x3)
-+
- /* FE */
- #define PSE_BASE                      0x0100
- #define CSR_IFC_BASE                  0x0200
-@@ -583,6 +588,17 @@
- #define EGRESS_SLOW_TICK_RATIO_MASK   GENMASK(29, 16)
- #define EGRESS_FAST_TICK_MASK         GENMASK(15, 0)
-+#define TRTCM_PARAM_RW_MASK           BIT(31)
-+#define TRTCM_PARAM_RW_DONE_MASK      BIT(30)
-+#define TRTCM_PARAM_TYPE_MASK         GENMASK(29, 28)
-+#define TRTCM_METER_GROUP_MASK                GENMASK(27, 26)
-+#define TRTCM_PARAM_INDEX_MASK                GENMASK(23, 17)
-+#define TRTCM_PARAM_RATE_TYPE_MASK    BIT(16)
-+
-+#define REG_TRTCM_CFG_PARAM(_n)               ((_n) + 0x4)
-+#define REG_TRTCM_DATA_LOW(_n)                ((_n) + 0x8)
-+#define REG_TRTCM_DATA_HIGH(_n)               ((_n) + 0xc)
-+
- #define REG_TXWRR_MODE_CFG            0x1020
- #define TWRR_WEIGHT_SCALE_MASK                BIT(31)
- #define TWRR_WEIGHT_BASE_MASK         BIT(3)
-@@ -759,6 +775,29 @@ enum tx_sched_mode {
-       TC_SCH_WRR2,
- };
-+enum trtcm_param_type {
-+      TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
-+      TRTCM_TOKEN_RATE_MODE,
-+      TRTCM_BUCKETSIZE_SHIFT_MODE,
-+      TRTCM_BUCKET_COUNTER_MODE,
-+};
-+
-+enum trtcm_mode_type {
-+      TRTCM_COMMIT_MODE,
-+      TRTCM_PEAK_MODE,
-+};
-+
-+enum trtcm_param {
-+      TRTCM_TICK_SEL = BIT(0),
-+      TRTCM_PKT_MODE = BIT(1),
-+      TRTCM_METER_MODE = BIT(2),
-+};
-+
-+#define MIN_TOKEN_SIZE                                4096
-+#define MAX_TOKEN_SIZE_OFFSET                 17
-+#define TRTCM_TOKEN_RATE_MASK                 GENMASK(23, 6)
-+#define TRTCM_TOKEN_RATE_FRACTION_MASK                GENMASK(5, 0)
-+
- struct airoha_queue_entry {
-       union {
-               void *buf;
-@@ -850,6 +889,8 @@ struct airoha_gdm_port {
-       struct airoha_hw_stats stats;
-+      DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
-+
-       /* qos stats counters */
-       u64 cpu_tx_packets;
-       u64 fwd_tx_packets;
-@@ -2810,6 +2851,243 @@ static int airoha_tc_setup_qdisc_ets(str
-       }
- }
-+static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
-+                                     u32 addr, enum trtcm_param_type param,
-+                                     enum trtcm_mode_type mode,
-+                                     u32 *val_low, u32 *val_high)
-+{
-+      u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
-+      u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
-+                        FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
-+                        FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
-+                        FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
-+
-+      airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
-+      if (read_poll_timeout(airoha_qdma_rr, val,
-+                            val & TRTCM_PARAM_RW_DONE_MASK,
-+                            USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
-+                            qdma, REG_TRTCM_CFG_PARAM(addr)))
-+              return -ETIMEDOUT;
-+
-+      *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
-+      if (val_high)
-+              *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
-+
-+      return 0;
-+}
-+
-+static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
-+                                     u32 addr, enum trtcm_param_type param,
-+                                     enum trtcm_mode_type mode, u32 val)
-+{
-+      u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
-+      u32 config = TRTCM_PARAM_RW_MASK |
-+                   FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
-+                   FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
-+                   FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
-+                   FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
-+
-+      airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
-+      airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
-+
-+      return read_poll_timeout(airoha_qdma_rr, val,
-+                               val & TRTCM_PARAM_RW_DONE_MASK,
-+                               USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
-+                               qdma, REG_TRTCM_CFG_PARAM(addr));
-+}
-+
-+static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
-+                                      u32 addr, enum trtcm_mode_type mode,
-+                                      bool enable, u32 enable_mask)
-+{
-+      u32 val;
-+
-+      if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
-+                                      mode, &val, NULL))
-+              return -EINVAL;
-+
-+      val = enable ? val | enable_mask : val & ~enable_mask;
-+
-+      return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
-+                                         mode, val);
-+}
-+
-+static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
-+                                            int channel, u32 addr,
-+                                            enum trtcm_mode_type mode,
-+                                            u32 rate_val, u32 bucket_size)
-+{
-+      u32 val, config, tick, unit, rate, rate_frac;
-+      int err;
-+
-+      if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
-+                                      mode, &config, NULL))
-+              return -EINVAL;
-+
-+      val = airoha_qdma_rr(qdma, addr);
-+      tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
-+      if (config & TRTCM_TICK_SEL)
-+              tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
-+      if (!tick)
-+              return -EINVAL;
-+
-+      unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
-+      if (!unit)
-+              return -EINVAL;
-+
-+      rate = rate_val / unit;
-+      rate_frac = rate_val % unit;
-+      rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
-+      rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
-+             FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
-+
-+      err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
-+                                        TRTCM_TOKEN_RATE_MODE, mode, rate);
-+      if (err)
-+              return err;
-+
-+      val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
-+      val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
-+
-+      return airoha_qdma_set_trtcm_param(qdma, channel, addr,
-+                                         TRTCM_BUCKETSIZE_SHIFT_MODE,
-+                                         mode, val);
-+}
-+
-+static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
-+                                       int channel, u32 rate,
-+                                       u32 bucket_size)
-+{
-+      int i, err;
-+
-+      for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
-+              err = airoha_qdma_set_trtcm_config(port->qdma, channel,
-+                                                 REG_EGRESS_TRTCM_CFG, i,
-+                                                 !!rate, TRTCM_METER_MODE);
-+              if (err)
-+                      return err;
-+
-+              err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
-+                                                       REG_EGRESS_TRTCM_CFG,
-+                                                       i, rate, bucket_size);
-+              if (err)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
-+                                        struct tc_htb_qopt_offload *opt)
-+{
-+      u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
-+      u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
-+      struct net_device *dev = port->dev;
-+      int num_tx_queues = dev->real_num_tx_queues;
-+      int err;
-+
-+      if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
-+              NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
-+              return -EINVAL;
-+      }
-+
-+      err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
-+      if (err) {
-+              NL_SET_ERR_MSG_MOD(opt->extack,
-+                                 "failed configuring htb offload");
-+              return err;
-+      }
-+
-+      if (opt->command == TC_HTB_NODE_MODIFY)
-+              return 0;
-+
-+      err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
-+      if (err) {
-+              airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
-+              NL_SET_ERR_MSG_MOD(opt->extack,
-+                                 "failed setting real_num_tx_queues");
-+              return err;
-+      }
-+
-+      set_bit(channel, port->qos_sq_bmap);
-+      opt->qid = AIROHA_NUM_TX_RING + channel;
-+
-+      return 0;
-+}
-+
-+static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
-+{
-+      struct net_device *dev = port->dev;
-+
-+      netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
-+      airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
-+      clear_bit(queue, port->qos_sq_bmap);
-+}
-+
-+static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
-+                                         struct tc_htb_qopt_offload *opt)
-+{
-+      u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
-+
-+      if (!test_bit(channel, port->qos_sq_bmap)) {
-+              NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
-+              return -EINVAL;
-+      }
-+
-+      airoha_tc_remove_htb_queue(port, channel);
-+
-+      return 0;
-+}
-+
-+static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
-+{
-+      int q;
-+
-+      for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
-+              airoha_tc_remove_htb_queue(port, q);
-+
-+      return 0;
-+}
-+
-+static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
-+                                          struct tc_htb_qopt_offload *opt)
-+{
-+      u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
-+
-+      if (!test_bit(channel, port->qos_sq_bmap)) {
-+              NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
-+              return -EINVAL;
-+      }
-+
-+      opt->qid = channel;
-+
-+      return 0;
-+}
-+
-+static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
-+                                   struct tc_htb_qopt_offload *opt)
-+{
-+      switch (opt->command) {
-+      case TC_HTB_CREATE:
-+              break;
-+      case TC_HTB_DESTROY:
-+              return airoha_tc_htb_destroy(port);
-+      case TC_HTB_NODE_MODIFY:
-+      case TC_HTB_LEAF_ALLOC_QUEUE:
-+              return airoha_tc_htb_alloc_leaf_queue(port, opt);
-+      case TC_HTB_LEAF_DEL:
-+      case TC_HTB_LEAF_DEL_LAST:
-+      case TC_HTB_LEAF_DEL_LAST_FORCE:
-+              return airoha_tc_htb_delete_leaf_queue(port, opt);
-+      case TC_HTB_LEAF_QUERY_QUEUE:
-+              return airoha_tc_get_htb_get_leaf_queue(port, opt);
-+      default:
-+              return -EOPNOTSUPP;
-+      }
-+
-+      return 0;
-+}
-+
- static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
-                              void *type_data)
- {
-@@ -2818,6 +3096,8 @@ static int airoha_dev_tc_setup(struct ne
-       switch (type) {
-       case TC_SETUP_QDISC_ETS:
-               return airoha_tc_setup_qdisc_ets(port, type_data);
-+      case TC_SETUP_QDISC_HTB:
-+              return airoha_tc_setup_qdisc_htb(port, type_data);
-       default:
-               return -EOPNOTSUPP;
-       }
-@@ -2868,7 +3148,8 @@ static int airoha_alloc_gdm_port(struct
-       }
-       dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
--                                    AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING);
-+                                    AIROHA_NUM_NETDEV_TX_RINGS,
-+                                    AIROHA_NUM_RX_RING);
-       if (!dev) {
-               dev_err(eth->dev, "alloc_etherdev failed\n");
-               return -ENOMEM;
-@@ -2888,6 +3169,11 @@ static int airoha_alloc_gdm_port(struct
-       dev->irq = qdma->irq;
-       SET_NETDEV_DEV(dev, eth->dev);
-+      /* reserve hw queues for HTB offloading */
-+      err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
-+      if (err)
-+              return err;
-+
-       err = of_get_ethdev_address(np, dev);
-       if (err) {
-               if (err == -EPROBE_DEFER)
diff --git a/target/linux/airoha/patches-6.6/039-v6.14-cpufreq-airoha-Add-EN7581-CPUFreq-SMCCC-driver.patch b/target/linux/airoha/patches-6.6/039-v6.14-cpufreq-airoha-Add-EN7581-CPUFreq-SMCCC-driver.patch
deleted file mode 100644 (file)
index 0db82d5..0000000
+++ /dev/null
@@ -1,232 +0,0 @@
-From 84cf9e541cccb8cb698518a9897942e8c78f1d83 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Thu, 9 Jan 2025 14:12:58 +0100
-Subject: [PATCH] cpufreq: airoha: Add EN7581 CPUFreq SMCCC driver
-
-Add simple CPU Freq driver for Airoha EN7581 SoC that control CPU
-frequency scaling with SMC APIs and register a generic "cpufreq-dt"
-device.
-
-All CPU share the same frequency and can't be controlled independently.
-CPU frequency is controlled by the attached PM domain.
-
-Add SoC compatible to cpufreq-dt-plat block list as a dedicated cpufreq
-driver is needed with OPP v2 nodes declared in DTS.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
----
- drivers/cpufreq/Kconfig.arm          |   8 ++
- drivers/cpufreq/Makefile             |   1 +
- drivers/cpufreq/airoha-cpufreq.c     | 152 +++++++++++++++++++++++++++
- drivers/cpufreq/cpufreq-dt-platdev.c |   2 +
- 4 files changed, 163 insertions(+)
- create mode 100644 drivers/cpufreq/airoha-cpufreq.c
-
---- a/drivers/cpufreq/Kconfig.arm
-+++ b/drivers/cpufreq/Kconfig.arm
-@@ -41,6 +41,14 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVME
-         To compile this driver as a module, choose M here: the
-         module will be called sun50i-cpufreq-nvmem.
-+config ARM_AIROHA_SOC_CPUFREQ
-+      tristate "Airoha EN7581 SoC CPUFreq support"
-+      depends on ARCH_AIROHA || COMPILE_TEST
-+      select PM_OPP
-+      default ARCH_AIROHA
-+      help
-+        This adds the CPUFreq driver for Airoha EN7581 SoCs.
-+
- config ARM_APPLE_SOC_CPUFREQ
-       tristate "Apple Silicon SoC CPUFreq support"
-       depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
---- a/drivers/cpufreq/Makefile
-+++ b/drivers/cpufreq/Makefile
-@@ -52,6 +52,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY)       +
- ##################################################################################
- # ARM SoC drivers
-+obj-$(CONFIG_ARM_AIROHA_SOC_CPUFREQ)  += airoha-cpufreq.o
- obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ)   += apple-soc-cpufreq.o
- obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
- obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ)   += armada-8k-cpufreq.o
---- /dev/null
-+++ b/drivers/cpufreq/airoha-cpufreq.c
-@@ -0,0 +1,166 @@
-+// SPDX-License-Identifier: GPL-2.0
-+
-+#include <linux/bitfield.h>
-+#include <linux/cpufreq.h>
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+#include <linux/pm_runtime.h>
-+#include <linux/slab.h>
-+
-+#include "cpufreq-dt.h"
-+
-+struct airoha_cpufreq_priv {
-+      int opp_token;
-+      struct device **virt_devs;
-+      struct platform_device *cpufreq_dt;
-+};
-+
-+static struct platform_device *cpufreq_pdev;
-+
-+/* NOP function to disable OPP from setting clock */
-+static int airoha_cpufreq_config_clks_nop(struct device *dev,
-+                                        struct opp_table *opp_table,
-+                                        struct dev_pm_opp *old_opp,
-+                                        struct dev_pm_opp *opp,
-+                                        void *data, bool scaling_down)
-+{
-+      return 0;
-+}
-+
-+static const char * const airoha_cpufreq_clk_names[] = { "cpu", NULL };
-+static const char * const airoha_cpufreq_pd_names[] = { "perf", NULL };
-+
-+static int airoha_cpufreq_probe(struct platform_device *pdev)
-+{
-+      struct dev_pm_opp_config config = {
-+              .clk_names = airoha_cpufreq_clk_names,
-+              .config_clks = airoha_cpufreq_config_clks_nop,
-+              .genpd_names = airoha_cpufreq_pd_names,
-+      };
-+      struct platform_device *cpufreq_dt;
-+      struct airoha_cpufreq_priv *priv;
-+      struct device *dev = &pdev->dev;
-+      struct device **virt_devs = NULL;
-+      struct device *cpu_dev;
-+      int ret;
-+
-+      /* CPUs refer to the same OPP table */
-+      cpu_dev = get_cpu_device(0);
-+      if (!cpu_dev)
-+              return -ENODEV;
-+
-+      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-+      if (!priv)
-+              return -ENOMEM;
-+
-+      /* Set OPP table conf with NOP config_clks */
-+      priv->opp_token = dev_pm_opp_set_config(cpu_dev, &config);
-+      if (priv->opp_token < 0)
-+              return dev_err_probe(dev, priv->opp_token, "Failed to set OPP config\n");
-+
-+      /* Set Attached PM for OPP ACTIVE */
-+      if (virt_devs) {
-+              const char * const *name = airoha_cpufreq_pd_names;
-+              int i, j;
-+
-+              for (i = 0; *name; i++, name++) {
-+                      ret = pm_runtime_resume_and_get(virt_devs[i]);
-+                      if (ret) {
-+                              dev_err(cpu_dev, "failed to resume %s: %d\n",
-+                                      *name, ret);
-+
-+                              /* Rollback previous PM runtime calls */
-+                              name = config.genpd_names;
-+                              for (j = 0; *name && j < i; j++, name++)
-+                                      pm_runtime_put(virt_devs[j]);
-+
-+                              goto err_register_cpufreq;
-+                      }
-+              }
-+              priv->virt_devs = virt_devs;
-+      }
-+
-+      cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
-+      ret = PTR_ERR_OR_ZERO(cpufreq_dt);
-+      if (ret) {
-+              dev_err(dev, "failed to create cpufreq-dt device: %d\n", ret);
-+              goto err_register_cpufreq;
-+      }
-+
-+      priv->cpufreq_dt = cpufreq_dt;
-+      platform_set_drvdata(pdev, priv);
-+
-+      return 0;
-+
-+err_register_cpufreq:
-+      dev_pm_opp_clear_config(priv->opp_token);
-+
-+      return ret;
-+}
-+
-+static void airoha_cpufreq_remove(struct platform_device *pdev)
-+{
-+      struct airoha_cpufreq_priv *priv = platform_get_drvdata(pdev);
-+      const char * const *name = airoha_cpufreq_pd_names;
-+      int i;
-+
-+      platform_device_unregister(priv->cpufreq_dt);
-+
-+      dev_pm_opp_clear_config(priv->opp_token);
-+
-+      for (i = 0; *name; i++, name++)
-+              pm_runtime_put(priv->virt_devs[i]);
-+}
-+
-+static struct platform_driver airoha_cpufreq_driver = {
-+      .probe = airoha_cpufreq_probe,
-+      .remove_new = airoha_cpufreq_remove,
-+      .driver = {
-+              .name = "airoha-cpufreq",
-+      },
-+};
-+
-+static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
-+      { .compatible = "airoha,en7581" },
-+      {},
-+};
-+MODULE_DEVICE_TABLE(of, airoha_cpufreq_match_list);
-+
-+static int __init airoha_cpufreq_init(void)
-+{
-+      struct device_node *np = of_find_node_by_path("/");
-+      const struct of_device_id *match;
-+      int ret;
-+
-+      if (!np)
-+              return -ENODEV;
-+
-+      match = of_match_node(airoha_cpufreq_match_list, np);
-+      of_node_put(np);
-+      if (!match)
-+              return -ENODEV;
-+
-+      ret = platform_driver_register(&airoha_cpufreq_driver);
-+      if (unlikely(ret < 0))
-+              return ret;
-+
-+      cpufreq_pdev = platform_device_register_data(NULL, "airoha-cpufreq",
-+                                                   -1, match, sizeof(*match));
-+      ret = PTR_ERR_OR_ZERO(cpufreq_pdev);
-+      if (ret)
-+              platform_driver_unregister(&airoha_cpufreq_driver);
-+
-+      return ret;
-+}
-+module_init(airoha_cpufreq_init);
-+
-+static void __exit airoha_cpufreq_exit(void)
-+{
-+      platform_device_unregister(cpufreq_pdev);
-+      platform_driver_unregister(&airoha_cpufreq_driver);
-+}
-+module_exit(airoha_cpufreq_exit);
-+
-+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
-+MODULE_DESCRIPTION("CPUfreq driver for Airoha SoCs");
-+MODULE_LICENSE("GPL");
---- a/drivers/cpufreq/cpufreq-dt-platdev.c
-+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
-@@ -103,6 +103,8 @@ static const struct of_device_id allowli
-  * platforms using "operating-points-v2" property.
-  */
- static const struct of_device_id blocklist[] __initconst = {
-+      { .compatible = "airoha,en7581", },
-+
-       { .compatible = "allwinner,sun50i-h6", },
-       { .compatible = "apple,arm-platform", },
diff --git a/target/linux/airoha/patches-6.6/039-v6.14-net-airoha-Enforce-ETS-Qdisc-priomap.patch b/target/linux/airoha/patches-6.6/039-v6.14-net-airoha-Enforce-ETS-Qdisc-priomap.patch
deleted file mode 100644 (file)
index 2753972..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-From b56e4d660a9688ff83f5cbdc6e3ea063352d0d79 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sun, 12 Jan 2025 19:32:45 +0100
-Subject: [PATCH] net: airoha: Enforce ETS Qdisc priomap
-
-EN7581 SoC supports fixed QoS band priority where WRR queues have lowest
-priorities with respect to SP ones.
-E.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
-
-Enforce ETS Qdisc priomap according to the hw capabilities.
-
-Suggested-by: Davide Caratti <dcaratti@redhat.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Davide Caratti <dcaratti@redhat.com>
-Link: https://patch.msgid.link/20250112-airoha_ets_priomap-v1-1-fb616de159ba@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 17 +++++++++++++++--
- 1 file changed, 15 insertions(+), 2 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -2786,7 +2786,7 @@ static int airoha_qdma_set_tx_ets_sched(
-       struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
-       enum tx_sched_mode mode = TC_SCH_SP;
-       u16 w[AIROHA_NUM_QOS_QUEUES] = {};
--      int i, nstrict = 0;
-+      int i, nstrict = 0, nwrr, qidx;
-       if (p->bands > AIROHA_NUM_QOS_QUEUES)
-               return -EINVAL;
-@@ -2800,7 +2800,20 @@ static int airoha_qdma_set_tx_ets_sched(
-       if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
-               return -EINVAL;
--      for (i = 0; i < p->bands - nstrict; i++)
-+      /* EN7581 SoC supports fixed QoS band priority where WRR queues have
-+       * lowest priorities with respect to SP ones.
-+       * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
-+       */
-+      nwrr = p->bands - nstrict;
-+      qidx = nstrict && nwrr ? nstrict : 0;
-+      for (i = 1; i <= p->bands; i++) {
-+              if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx)
-+                      return -EINVAL;
-+
-+              qidx = i == nwrr ? 0 : qidx + 1;
-+      }
-+
-+      for (i = 0; i < nwrr; i++)
-               w[i] = p->weights[nstrict + i];
-       if (!nstrict)
diff --git a/target/linux/airoha/patches-6.6/040-v6.14-pmdomain-airoha-Add-Airoha-CPU-PM-Domain-support.patch b/target/linux/airoha/patches-6.6/040-v6.14-pmdomain-airoha-Add-Airoha-CPU-PM-Domain-support.patch
deleted file mode 100644 (file)
index 8dc8a3d..0000000
+++ /dev/null
@@ -1,196 +0,0 @@
-From 82e703dd438b71432cc0ccbb90925d1e32dd014a Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Thu, 9 Jan 2025 14:12:57 +0100
-Subject: [PATCH] pmdomain: airoha: Add Airoha CPU PM Domain support
-
-Add Airoha CPU PM Domain support to control frequency and power of CPU
-present on Airoha EN7581 SoC.
-
-Frequency and power can be controlled with the use of the SMC command by
-passing the performance state. The driver also expose a read-only clock
-that expose the current CPU frequency with SMC command.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Link: https://lore.kernel.org/r/20250109131313.32317-1-ansuelsmth@gmail.com
-Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
----
- drivers/pmdomain/mediatek/Kconfig             |  12 ++
- drivers/pmdomain/mediatek/Makefile            |   1 +
- .../pmdomain/mediatek/airoha-cpu-pmdomain.c   | 144 ++++++++++++++++++
- 3 files changed, 157 insertions(+)
- create mode 100644 drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c
-
---- a/drivers/soc/mediatek/Kconfig
-+++ b/drivers/soc/mediatek/Kconfig
-@@ -2,6 +2,17 @@
- #
- # MediaTek SoC drivers
- #
-+config AIROHA_CPU_PM_DOMAIN
-+      tristate "Airoha CPU power domain"
-+      default ARCH_AIROHA
-+      depends on PM
-+      select PM_GENERIC_DOMAINS
-+      help
-+        Say y here to enable CPU power domain support for Airoha SoC.
-+
-+        CPU frequency and power is controlled by ATF with SMC command to
-+        set performance states.
-+
- menu "MediaTek SoC drivers"
-       depends on ARCH_MEDIATEK || COMPILE_TEST
---- a/drivers/pmdomain/mediatek/Makefile
-+++ b/drivers/pmdomain/mediatek/Makefile
-@@ -1,3 +1,4 @@
- # SPDX-License-Identifier: GPL-2.0-only
- obj-$(CONFIG_MTK_SCPSYS)              += mtk-scpsys.o
- obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS)   += mtk-pm-domains.o
-+obj-$(CONFIG_AIROHA_CPU_PM_DOMAIN)    += airoha-cpu-pmdomain.o
---- /dev/null
-+++ b/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c
-@@ -0,0 +1,144 @@
-+// SPDX-License-Identifier: GPL-2.0
-+
-+#include <linux/arm-smccc.h>
-+#include <linux/bitfield.h>
-+#include <linux/clk-provider.h>
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+#include <linux/pm_domain.h>
-+#include <linux/slab.h>
-+
-+#define AIROHA_SIP_AVS_HANDLE                 0x82000301
-+#define AIROHA_AVS_OP_BASE                    0xddddddd0
-+#define AIROHA_AVS_OP_MASK                    GENMASK(1, 0)
-+#define AIROHA_AVS_OP_FREQ_DYN_ADJ            (AIROHA_AVS_OP_BASE | \
-+                                               FIELD_PREP(AIROHA_AVS_OP_MASK, 0x1))
-+#define AIROHA_AVS_OP_GET_FREQ                        (AIROHA_AVS_OP_BASE | \
-+                                               FIELD_PREP(AIROHA_AVS_OP_MASK, 0x2))
-+
-+struct airoha_cpu_pmdomain_priv {
-+      struct clk_hw hw;
-+      struct generic_pm_domain pd;
-+};
-+
-+static long airoha_cpu_pmdomain_clk_round(struct clk_hw *hw, unsigned long rate,
-+                                        unsigned long *parent_rate)
-+{
-+      return rate;
-+}
-+
-+static unsigned long airoha_cpu_pmdomain_clk_get(struct clk_hw *hw,
-+                                               unsigned long parent_rate)
-+{
-+      struct arm_smccc_res res;
-+
-+      arm_smccc_1_1_invoke(AIROHA_SIP_AVS_HANDLE, AIROHA_AVS_OP_GET_FREQ,
-+                           0, 0, 0, 0, 0, 0, &res);
-+
-+      /* SMCCC returns freq in MHz */
-+      return (int)(res.a0 * 1000 * 1000);
-+}
-+
-+/* Airoha CPU clk SMCC is always enabled */
-+static int airoha_cpu_pmdomain_clk_is_enabled(struct clk_hw *hw)
-+{
-+      return true;
-+}
-+
-+static const struct clk_ops airoha_cpu_pmdomain_clk_ops = {
-+      .recalc_rate = airoha_cpu_pmdomain_clk_get,
-+      .is_enabled = airoha_cpu_pmdomain_clk_is_enabled,
-+      .round_rate = airoha_cpu_pmdomain_clk_round,
-+};
-+
-+static int airoha_cpu_pmdomain_set_performance_state(struct generic_pm_domain *domain,
-+                                                   unsigned int state)
-+{
-+      struct arm_smccc_res res;
-+
-+      arm_smccc_1_1_invoke(AIROHA_SIP_AVS_HANDLE, AIROHA_AVS_OP_FREQ_DYN_ADJ,
-+                           0, state, 0, 0, 0, 0, &res);
-+
-+      /* SMC signal correct apply by unsetting BIT 0 */
-+      return res.a0 & BIT(0) ? -EINVAL : 0;
-+}
-+
-+static int airoha_cpu_pmdomain_probe(struct platform_device *pdev)
-+{
-+      struct airoha_cpu_pmdomain_priv *priv;
-+      struct device *dev = &pdev->dev;
-+      const struct clk_init_data init = {
-+              .name = "cpu",
-+              .ops = &airoha_cpu_pmdomain_clk_ops,
-+              /* Clock with no set_rate, can't cache */
-+              .flags = CLK_GET_RATE_NOCACHE,
-+      };
-+      struct generic_pm_domain *pd;
-+      int ret;
-+
-+      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-+      if (!priv)
-+              return -ENOMEM;
-+
-+      /* Init and register a get-only clk for Cpufreq */
-+      priv->hw.init = &init;
-+      ret = devm_clk_hw_register(dev, &priv->hw);
-+      if (ret)
-+              return ret;
-+
-+      ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
-+                                        &priv->hw);
-+      if (ret)
-+              return ret;
-+
-+      /* Init and register a PD for CPU */
-+      pd = &priv->pd;
-+      pd->name = "cpu_pd";
-+      pd->flags = GENPD_FLAG_ALWAYS_ON;
-+      pd->set_performance_state = airoha_cpu_pmdomain_set_performance_state;
-+
-+      ret = pm_genpd_init(pd, NULL, false);
-+      if (ret)
-+              return ret;
-+
-+      ret = of_genpd_add_provider_simple(dev->of_node, pd);
-+      if (ret)
-+              goto err_add_provider;
-+
-+      platform_set_drvdata(pdev, priv);
-+
-+      return 0;
-+
-+err_add_provider:
-+      pm_genpd_remove(pd);
-+
-+      return ret;
-+}
-+
-+static void airoha_cpu_pmdomain_remove(struct platform_device *pdev)
-+{
-+      struct airoha_cpu_pmdomain_priv *priv = platform_get_drvdata(pdev);
-+
-+      of_genpd_del_provider(pdev->dev.of_node);
-+      pm_genpd_remove(&priv->pd);
-+}
-+
-+static const struct of_device_id airoha_cpu_pmdomain_of_match[] = {
-+      { .compatible = "airoha,en7581-cpufreq" },
-+      { },
-+};
-+MODULE_DEVICE_TABLE(of, airoha_cpu_pmdomain_of_match);
-+
-+static struct platform_driver airoha_cpu_pmdomain_driver = {
-+      .probe = airoha_cpu_pmdomain_probe,
-+      .remove_new = airoha_cpu_pmdomain_remove,
-+      .driver = {
-+              .name = "airoha-cpu-pmdomain",
-+              .of_match_table = airoha_cpu_pmdomain_of_match,
-+      },
-+};
-+module_platform_driver(airoha_cpu_pmdomain_driver);
-+
-+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
-+MODULE_DESCRIPTION("CPU PM domain driver for Airoha SoCs");
-+MODULE_LICENSE("GPL");
diff --git a/target/linux/airoha/patches-6.6/041-01-v6.14-clk-en7523-Rework-clock-handling-for-different-clock.patch b/target/linux/airoha/patches-6.6/041-01-v6.14-clk-en7523-Rework-clock-handling-for-different-clock.patch
deleted file mode 100644 (file)
index 96d2bbf..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-From e4a9748e7103c47e575459db2b6a77d14f34da2b Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Tue, 14 Jan 2025 00:10:02 +0100
-Subject: [PATCH 1/4] clk: en7523: Rework clock handling for different clock
- numbers
-
-Airoha EN7581 SoC have additional clock compared to EN7523 but current
-driver permits to only support up to EN7523 clock numbers.
-
-To handle this, rework the clock handling and permit to declare the
-clocks number in match_data and alloca clk_data based on the compatible
-match_data.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Link: https://lore.kernel.org/r/20250113231030.6735-2-ansuelsmth@gmail.com
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 14 ++++++++------
- 1 file changed, 8 insertions(+), 6 deletions(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -75,6 +75,7 @@ struct en_rst_data {
- };
- struct en_clk_soc_data {
-+      u32 num_clocks;
-       const struct clk_ops pcie_ops;
-       int (*hw_init)(struct platform_device *pdev,
-                      struct clk_hw_onecell_data *clk_data);
-@@ -504,8 +505,6 @@ static void en7523_register_clocks(struc
-       u32 rate;
-       int i;
--      clk_data->num = EN7523_NUM_CLOCKS;
--
-       for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
-               const struct en_clk_desc *desc = &en7523_base_clks[i];
-               u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
-@@ -587,8 +586,6 @@ static void en7581_register_clocks(struc
-       hw = en7523_register_pcie_clk(dev, base);
-       clk_data->hws[EN7523_CLK_PCIE] = hw;
--
--      clk_data->num = EN7523_NUM_CLOCKS;
- }
- static int en7523_reset_update(struct reset_controller_dev *rcdev,
-@@ -702,13 +699,15 @@ static int en7523_clk_probe(struct platf
-       struct clk_hw_onecell_data *clk_data;
-       int r;
-+      soc_data = device_get_match_data(&pdev->dev);
-+
-       clk_data = devm_kzalloc(&pdev->dev,
--                              struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
-+                              struct_size(clk_data, hws, soc_data->num_clocks),
-                               GFP_KERNEL);
-       if (!clk_data)
-               return -ENOMEM;
--      soc_data = device_get_match_data(&pdev->dev);
-+      clk_data->num = soc_data->num_clocks;
-       r = soc_data->hw_init(pdev, clk_data);
-       if (r)
-               return r;
-@@ -717,6 +716,7 @@ static int en7523_clk_probe(struct platf
- }
- static const struct en_clk_soc_data en7523_data = {
-+      .num_clocks = ARRAY_SIZE(en7523_base_clks) + 1,
-       .pcie_ops = {
-               .is_enabled = en7523_pci_is_enabled,
-               .prepare = en7523_pci_prepare,
-@@ -726,6 +726,8 @@ static const struct en_clk_soc_data en75
- };
- static const struct en_clk_soc_data en7581_data = {
-+      /* We increment num_clocks by 1 to account for additional PCIe clock */
-+      .num_clocks = ARRAY_SIZE(en7581_base_clks) + 1,
-       .pcie_ops = {
-               .is_enabled = en7581_pci_is_enabled,
-               .enable = en7581_pci_enable,
diff --git a/target/linux/airoha/patches-6.6/041-02-v6.14-dt-bindings-clock-drop-NUM_CLOCKS-define-for-EN7581.patch b/target/linux/airoha/patches-6.6/041-02-v6.14-dt-bindings-clock-drop-NUM_CLOCKS-define-for-EN7581.patch
deleted file mode 100644 (file)
index 5db79a4..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-From 02d3b7557ce28c373ea1e925ae16ab5988284313 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Tue, 14 Jan 2025 00:10:03 +0100
-Subject: [PATCH 2/4] dt-bindings: clock: drop NUM_CLOCKS define for EN7581
-
-Drop NUM_CLOCKS define for EN7581 include. This is not a binding and
-should not be placed here. Value is derived internally in the user
-driver.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Acked-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
-Link: https://lore.kernel.org/r/20250113231030.6735-3-ansuelsmth@gmail.com
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- include/dt-bindings/clock/en7523-clk.h | 2 --
- 1 file changed, 2 deletions(-)
-
---- a/include/dt-bindings/clock/en7523-clk.h
-+++ b/include/dt-bindings/clock/en7523-clk.h
-@@ -12,6 +12,4 @@
- #define EN7523_CLK_CRYPTO     6
- #define EN7523_CLK_PCIE               7
--#define EN7523_NUM_CLOCKS     8
--
- #endif /* _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_ */
diff --git a/target/linux/airoha/patches-6.6/041-03-v6.14-dt-bindings-clock-add-ID-for-eMMC-for-EN7581.patch b/target/linux/airoha/patches-6.6/041-03-v6.14-dt-bindings-clock-add-ID-for-eMMC-for-EN7581.patch
deleted file mode 100644 (file)
index a3f0c9e..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-From 82108ad3285f58f314ad41398f44017c7dbe44de Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Tue, 14 Jan 2025 00:10:04 +0100
-Subject: [PATCH 3/4] dt-bindings: clock: add ID for eMMC for EN7581
-
-Add ID for eMMC for EN7581. This is to control clock selection of eMMC
-between 200MHz and 150MHz.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Acked-by: Conor Dooley <conor.dooley@microchip.com>
-Link: https://lore.kernel.org/r/20250113231030.6735-4-ansuelsmth@gmail.com
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- include/dt-bindings/clock/en7523-clk.h | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/include/dt-bindings/clock/en7523-clk.h
-+++ b/include/dt-bindings/clock/en7523-clk.h
-@@ -12,4 +12,6 @@
- #define EN7523_CLK_CRYPTO     6
- #define EN7523_CLK_PCIE               7
-+#define EN7581_CLK_EMMC               8
-+
- #endif /* _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_ */
diff --git a/target/linux/airoha/patches-6.6/041-04-v6.14-clk-en7523-Add-clock-for-eMMC-for-EN7581.patch b/target/linux/airoha/patches-6.6/041-04-v6.14-clk-en7523-Add-clock-for-eMMC-for-EN7581.patch
deleted file mode 100644 (file)
index 6c8a330..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-From bfe257f9780d8f77045a7da6ec959ee0659d2f98 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Tue, 14 Jan 2025 00:10:05 +0100
-Subject: [PATCH 4/4] clk: en7523: Add clock for eMMC for EN7581
-
-Add clock for eMMC for EN7581. This is used to give info of the current
-eMMC source clock and to switch it from 200MHz or 150MHz.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Link: https://lore.kernel.org/r/20250113231030.6735-5-ansuelsmth@gmail.com
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c | 10 ++++++++++
- 1 file changed, 10 insertions(+)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -91,6 +91,7 @@ static const u32 emi7581_base[] = { 5400
- static const u32 bus7581_base[] = { 600000000, 540000000 };
- static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
- static const u32 crypto_base[] = { 540000000, 480000000 };
-+static const u32 emmc7581_base[] = { 200000000, 150000000 };
- static const struct en_clk_desc en7523_base_clks[] = {
-       {
-@@ -281,6 +282,15 @@ static const struct en_clk_desc en7581_b
-               .base_shift = 0,
-               .base_values = crypto_base,
-               .n_base_values = ARRAY_SIZE(crypto_base),
-+      }, {
-+              .id = EN7581_CLK_EMMC,
-+              .name = "emmc",
-+
-+              .base_reg = REG_CRYPTO_CLKSRC2,
-+              .base_bits = 1,
-+              .base_shift = 12,
-+              .base_values = emmc7581_base,
-+              .n_base_values = ARRAY_SIZE(emmc7581_base),
-       }
- };
diff --git a/target/linux/airoha/patches-6.6/042-01-v6.14-PCI-mediatek-gen3-Rely-on-clk_bulk_prepare_enable-in.patch b/target/linux/airoha/patches-6.6/042-01-v6.14-PCI-mediatek-gen3-Rely-on-clk_bulk_prepare_enable-in.patch
deleted file mode 100644 (file)
index 9e353f7..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-From 0e7a622da17da0042294860cdb7a2fac091d25b1 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 8 Jan 2025 10:50:40 +0100
-Subject: [PATCH 1/6] PCI: mediatek-gen3: Rely on clk_bulk_prepare_enable() in
- mtk_pcie_en7581_power_up()
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Replace clk_bulk_prepare() and clk_bulk_enable() with
-clk_bulk_prepare_enable() in mtk_pcie_en7581_power_up() routine.
-
-Link: https://lore.kernel.org/r/20250108-pcie-en7581-fixes-v6-1-21ac939a3b9b@kernel.org
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
----
- drivers/pci/controller/pcie-mediatek-gen3.c | 14 +++-----------
- 1 file changed, 3 insertions(+), 11 deletions(-)
-
---- a/drivers/pci/controller/pcie-mediatek-gen3.c
-+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
-@@ -907,12 +907,6 @@ static int mtk_pcie_en7581_power_up(stru
-       pm_runtime_enable(dev);
-       pm_runtime_get_sync(dev);
--      err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
--      if (err) {
--              dev_err(dev, "failed to prepare clock\n");
--              goto err_clk_prepare;
--      }
--
-       val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
-             FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
-             FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
-@@ -925,17 +919,15 @@ static int mtk_pcie_en7581_power_up(stru
-             FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
-       writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
--      err = clk_bulk_enable(pcie->num_clks, pcie->clks);
-+      err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
-       if (err) {
-               dev_err(dev, "failed to prepare clock\n");
--              goto err_clk_enable;
-+              goto err_clk_prepare_enable;
-       }
-       return 0;
--err_clk_enable:
--      clk_bulk_unprepare(pcie->num_clks, pcie->clks);
--err_clk_prepare:
-+err_clk_prepare_enable:
-       pm_runtime_put_sync(dev);
-       pm_runtime_disable(dev);
-       reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
diff --git a/target/linux/airoha/patches-6.6/042-02-v6.14-PCI-mediatek-gen3-Move-reset-assert-callbacks-in-.po.patch b/target/linux/airoha/patches-6.6/042-02-v6.14-PCI-mediatek-gen3-Move-reset-assert-callbacks-in-.po.patch
deleted file mode 100644 (file)
index ed5636d..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-From e4c7dfd953f7618f0ccb70d87c1629634f306fab Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 8 Jan 2025 10:50:41 +0100
-Subject: [PATCH 2/6] PCI: mediatek-gen3: Move reset/assert callbacks in
- .power_up()
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-In order to make the code more readable, the reset_control_bulk_assert()
-function for PHY reset lines is moved to make it pair with
-reset_control_bulk_deassert() in mtk_pcie_power_up() and
-mtk_pcie_en7581_power_up(). The same change is done for
-reset_control_assert() used to assert MAC reset line.
-
-Introduce PCIE_MTK_RESET_TIME_US macro for the time needed to
-complete PCIe reset on MediaTek controller.
-
-Link: https://lore.kernel.org/r/20250108-pcie-en7581-fixes-v6-2-21ac939a3b9b@kernel.org
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
----
- drivers/pci/controller/pcie-mediatek-gen3.c | 28 +++++++++++++--------
- 1 file changed, 18 insertions(+), 10 deletions(-)
-
---- a/drivers/pci/controller/pcie-mediatek-gen3.c
-+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
-@@ -120,6 +120,8 @@
- #define MAX_NUM_PHY_RESETS            3
-+#define PCIE_MTK_RESET_TIME_US                10
-+
- /* Time in ms needed to complete PCIe reset on EN7581 SoC */
- #define PCIE_EN7581_RESET_TIME_MS     100
-@@ -875,9 +877,14 @@ static int mtk_pcie_en7581_power_up(stru
-       u32 val;
-       /*
--       * Wait for the time needed to complete the bulk assert in
--       * mtk_pcie_setup for EN7581 SoC.
-+       * The controller may have been left out of reset by the bootloader
-+       * so make sure that we get a clean start by asserting resets here.
-        */
-+      reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
-+                                pcie->phy_resets);
-+      reset_control_assert(pcie->mac_reset);
-+
-+      /* Wait for the time needed to complete the reset lines assert. */
-       mdelay(PCIE_EN7581_RESET_TIME_MS);
-       err = phy_init(pcie->phy);
-@@ -944,6 +951,15 @@ static int mtk_pcie_power_up(struct mtk_
-       struct device *dev = pcie->dev;
-       int err;
-+      /*
-+       * The controller may have been left out of reset by the bootloader
-+       * so make sure that we get a clean start by asserting resets here.
-+       */
-+      reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
-+                                pcie->phy_resets);
-+      reset_control_assert(pcie->mac_reset);
-+      usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
-+
-       /* PHY power on and enable pipe clock */
-       err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
-       if (err) {
-@@ -1016,14 +1032,6 @@ static int mtk_pcie_setup(struct mtk_gen
-        * counter since the bulk is shared.
-        */
-       reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
--      /*
--       * The controller may have been left out of reset by the bootloader
--       * so make sure that we get a clean start by asserting resets here.
--       */
--      reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
--
--      reset_control_assert(pcie->mac_reset);
--      usleep_range(10, 20);
-       /* Don't touch the hardware registers before power up */
-       err = pcie->soc->power_up(pcie);
diff --git a/target/linux/airoha/patches-6.6/042-03-v6.14-PCI-mediatek-gen3-Add-comment-about-initialization-o.patch b/target/linux/airoha/patches-6.6/042-03-v6.14-PCI-mediatek-gen3-Add-comment-about-initialization-o.patch
deleted file mode 100644 (file)
index 785e5bd..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-From 0c9d2d2ef0d916b490a9222ed20ff4616fca876d Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 8 Jan 2025 10:50:42 +0100
-Subject: [PATCH 3/6] PCI: mediatek-gen3: Add comment about initialization
- order in mtk_pcie_en7581_power_up()
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Add a comment in mtk_pcie_en7581_power_up() to clarify, unlike the other
-MediaTek Gen3 controllers, the Airoha EN7581 requires PHY initialization
-and power-on before PHY reset deassert.
-
-Link: https://lore.kernel.org/r/20250108-pcie-en7581-fixes-v6-3-21ac939a3b9b@kernel.org
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
-Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
----
- drivers/pci/controller/pcie-mediatek-gen3.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
---- a/drivers/pci/controller/pcie-mediatek-gen3.c
-+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
-@@ -887,6 +887,10 @@ static int mtk_pcie_en7581_power_up(stru
-       /* Wait for the time needed to complete the reset lines assert. */
-       mdelay(PCIE_EN7581_RESET_TIME_MS);
-+      /*
-+       * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
-+       * requires PHY initialization and power-on before PHY reset deassert.
-+       */
-       err = phy_init(pcie->phy);
-       if (err) {
-               dev_err(dev, "failed to initialize PHY\n");
diff --git a/target/linux/airoha/patches-6.6/042-04-v6.14-PCI-mediatek-gen3-Move-reset-delay-in-mtk_pcie_en758.patch b/target/linux/airoha/patches-6.6/042-04-v6.14-PCI-mediatek-gen3-Move-reset-delay-in-mtk_pcie_en758.patch
deleted file mode 100644 (file)
index 1e6d4cd..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-From 90d4e466c9ea2010f33880a36317a8486ccbe082 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 8 Jan 2025 10:50:43 +0100
-Subject: [PATCH 4/6] PCI: mediatek-gen3: Move reset delay in
- mtk_pcie_en7581_power_up()
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
-causing occasional PCIe link down issues. In order to overcome the
-problem, PCIe block is reset using REG_PCI_CONTROL (0x88) and
-REG_RESET_CONTROL (0x834) registers available in the clock module
-running clk_bulk_prepare_enable() in mtk_pcie_en7581_power_up().
-
-In order to make the code more readable, move the wait for the time
-needed to complete the PCIe reset from en7581_pci_enable() to
-mtk_pcie_en7581_power_up().
-
-Reduce reset timeout from 250ms to the standard PCIE_T_PVPERL_MS value
-(100ms) since it has no impact on the driver behavior.
-
-Link: https://lore.kernel.org/r/20250108-pcie-en7581-fixes-v6-4-21ac939a3b9b@kernel.org
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-Acked-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-en7523.c                    | 1 -
- drivers/pci/controller/pcie-mediatek-gen3.c | 7 +++++++
- 2 files changed, 7 insertions(+), 1 deletion(-)
-
---- a/drivers/clk/clk-en7523.c
-+++ b/drivers/clk/clk-en7523.c
-@@ -489,7 +489,6 @@ static int en7581_pci_enable(struct clk_
-              REG_PCI_CONTROL_PERSTOUT;
-       val = readl(np_base + REG_PCI_CONTROL);
-       writel(val | mask, np_base + REG_PCI_CONTROL);
--      msleep(250);
-       return 0;
- }
---- a/drivers/pci/controller/pcie-mediatek-gen3.c
-+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
-@@ -936,6 +936,13 @@ static int mtk_pcie_en7581_power_up(stru
-               goto err_clk_prepare_enable;
-       }
-+      /*
-+       * Airoha EN7581 performs PCIe reset via clk callbacks since it has a
-+       * hw issue with PCIE_PE_RSTB signal. Add wait for the time needed to
-+       * complete the PCIe reset.
-+       */
-+      msleep(PCIE_T_PVPERL_MS);
-+
-       return 0;
- err_clk_prepare_enable:
diff --git a/target/linux/airoha/patches-6.6/042-05-v6.14-PCI-mediatek-gen3-Rely-on-msleep-in-mtk_pcie_en7581_.patch b/target/linux/airoha/patches-6.6/042-05-v6.14-PCI-mediatek-gen3-Rely-on-msleep-in-mtk_pcie_en7581_.patch
deleted file mode 100644 (file)
index eb89411..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-From c98bee18d0a094e37100c85effe5e161418f8644 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 8 Jan 2025 10:50:44 +0100
-Subject: [PATCH 5/6] PCI: mediatek-gen3: Rely on msleep() in
- mtk_pcie_en7581_power_up()
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Since mtk_pcie_en7581_power_up() runs in non-atomic context, rely on
-msleep() routine instead of mdelay().
-
-Link: https://lore.kernel.org/r/20250108-pcie-en7581-fixes-v6-5-21ac939a3b9b@kernel.org
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
----
- drivers/pci/controller/pcie-mediatek-gen3.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/drivers/pci/controller/pcie-mediatek-gen3.c
-+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
-@@ -885,7 +885,7 @@ static int mtk_pcie_en7581_power_up(stru
-       reset_control_assert(pcie->mac_reset);
-       /* Wait for the time needed to complete the reset lines assert. */
--      mdelay(PCIE_EN7581_RESET_TIME_MS);
-+      msleep(PCIE_EN7581_RESET_TIME_MS);
-       /*
-        * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
-@@ -913,7 +913,7 @@ static int mtk_pcie_en7581_power_up(stru
-        * Wait for the time needed to complete the bulk de-assert above.
-        * This time is specific for EN7581 SoC.
-        */
--      mdelay(PCIE_EN7581_RESET_TIME_MS);
-+      msleep(PCIE_EN7581_RESET_TIME_MS);
-       pm_runtime_enable(dev);
-       pm_runtime_get_sync(dev);
diff --git a/target/linux/airoha/patches-6.6/042-06-v6.14-PCI-mediatek-gen3-Avoid-PCIe-resetting-via-PERST-for.patch b/target/linux/airoha/patches-6.6/042-06-v6.14-PCI-mediatek-gen3-Avoid-PCIe-resetting-via-PERST-for.patch
deleted file mode 100644 (file)
index d18d6f6..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-From 491cb9c5084790aafa02e843349492c284373231 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 9 Jan 2025 00:30:45 +0100
-Subject: [PATCH 6/6] PCI: mediatek-gen3: Avoid PCIe resetting via PERST# for
- Airoha EN7581 SoC
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Airoha EN7581 has a hw bug asserting/releasing PERST# signal causing
-occasional PCIe link down issues. In order to overcome the problem,
-PERST# signal is not asserted/released during device probe or
-suspend/resume phase and the PCIe block is reset using
-en7523_reset_assert() and en7581_pci_enable().
-
-Introduce flags field in the mtk_gen3_pcie_pdata struct in order to
-specify per-SoC capabilities.
-
-Link: https://lore.kernel.org/r/20250109-pcie-en7581-rst-fix-v4-1-4a45c89fb143@kernel.org
-Tested-by: Hui Ma <hui.ma@airoha.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
----
- drivers/pci/controller/pcie-mediatek-gen3.c | 59 ++++++++++++++-------
- 1 file changed, 41 insertions(+), 18 deletions(-)
-
---- a/drivers/pci/controller/pcie-mediatek-gen3.c
-+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
-@@ -127,10 +127,18 @@
- struct mtk_gen3_pcie;
-+enum mtk_gen3_pcie_flags {
-+      SKIP_PCIE_RSTB  = BIT(0), /* Skip PERST# assertion during device
-+                                 * probing or suspend/resume phase to
-+                                 * avoid hw bugs/issues.
-+                                 */
-+};
-+
- /**
-  * struct mtk_gen3_pcie_pdata - differentiate between host generations
-  * @power_up: pcie power_up callback
-  * @phy_resets: phy reset lines SoC data.
-+ * @flags: pcie device flags.
-  */
- struct mtk_gen3_pcie_pdata {
-       int (*power_up)(struct mtk_gen3_pcie *pcie);
-@@ -138,6 +146,7 @@ struct mtk_gen3_pcie_pdata {
-               const char *id[MAX_NUM_PHY_RESETS];
-               int num_resets;
-       } phy_resets;
-+      u32 flags;
- };
- /**
-@@ -404,22 +413,33 @@ static int mtk_pcie_startup_port(struct
-       val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
-       writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
--      /* Assert all reset signals */
--      val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
--      val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
--      writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
--
-       /*
--       * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
--       * and 2.2.1 (Initial Power-Up (G3 to S0)).
--       * The deassertion of PERST# should be delayed 100ms (TPVPERL)
--       * for the power and clock to become stable.
-+       * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
-+       * causing occasional PCIe link down. In order to overcome the issue,
-+       * PCIE_RSTB signals are not asserted/released at this stage and the
-+       * PCIe block is reset using en7523_reset_assert() and
-+       * en7581_pci_enable().
-        */
--      msleep(100);
--
--      /* De-assert reset signals */
--      val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
--      writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-+      if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
-+              /* Assert all reset signals */
-+              val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
-+              val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
-+                     PCIE_PE_RSTB;
-+              writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-+
-+              /*
-+               * Described in PCIe CEM specification revision 6.0.
-+               *
-+               * The deassertion of PERST# should be delayed 100ms (TPVPERL)
-+               * for the power and clock to become stable.
-+               */
-+              msleep(PCIE_T_PVPERL_MS);
-+
-+              /* De-assert reset signals */
-+              val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
-+                       PCIE_PE_RSTB);
-+              writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-+      }
-       /* Check if the link is up or not */
-       err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
-@@ -1178,10 +1198,12 @@ static int mtk_pcie_suspend_noirq(struct
-               return err;
-       }
--      /* Pull down the PERST# pin */
--      val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
--      val |= PCIE_PE_RSTB;
--      writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-+      if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
-+              /* Assert the PERST# pin */
-+              val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
-+              val |= PCIE_PE_RSTB;
-+              writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-+      }
-       dev_dbg(pcie->dev, "entered L2 states successfully");
-@@ -1232,6 +1254,7 @@ static const struct mtk_gen3_pcie_pdata
-               .id[2] = "phy-lane2",
-               .num_resets = 3,
-       },
-+      .flags = SKIP_PCIE_RSTB,
- };
- static const struct of_device_id mtk_pcie_of_match[] = {
diff --git a/target/linux/airoha/patches-6.6/043-v6.15-PCI-mediatek-gen3-Remove-leftover-mac_reset-assert-f.patch b/target/linux/airoha/patches-6.6/043-v6.15-PCI-mediatek-gen3-Remove-leftover-mac_reset-assert-f.patch
deleted file mode 100644 (file)
index b4a69eb..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-From b6d7bb0d3bd74b491e2e6fd59c4d5110d06fd63b Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sat, 1 Feb 2025 12:00:18 +0100
-Subject: [PATCH] PCI: mediatek-gen3: Remove leftover mac_reset assert for
- Airoha EN7581 SoC
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Remove a leftover assert for mac_reset line in mtk_pcie_en7581_power_up().
-
-This is not harmful since EN7581 does not requires mac_reset and
-mac_reset is not defined in EN7581 device tree.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
-Link: https://lore.kernel.org/r/20250201-pcie-en7581-remove-mac_reset-v2-1-a06786cdc683@kernel.org
-[kwilczynski: commit log]
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
----
- drivers/pci/controller/pcie-mediatek-gen3.c | 1 -
- 1 file changed, 1 deletion(-)
-
---- a/drivers/pci/controller/pcie-mediatek-gen3.c
-+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
-@@ -902,7 +902,6 @@ static int mtk_pcie_en7581_power_up(stru
-        */
-       reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
-                                 pcie->phy_resets);
--      reset_control_assert(pcie->mac_reset);
-       /* Wait for the time needed to complete the reset lines assert. */
-       msleep(PCIE_EN7581_RESET_TIME_MS);
diff --git a/target/linux/airoha/patches-6.6/044-v6.15-PCI-mediatek-gen3-Configure-PBUS_CSR-registers-for-E.patch b/target/linux/airoha/patches-6.6/044-v6.15-PCI-mediatek-gen3-Configure-PBUS_CSR-registers-for-E.patch
deleted file mode 100644 (file)
index 6470e03..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-From 249b78298078448a699c39356d27d8183af4b281 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 25 Feb 2025 09:04:07 +0100
-Subject: [PATCH] PCI: mediatek-gen3: Configure PBUS_CSR registers for EN7581
- SoC
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Configure PBus base address and address mask to allow the hw
-to detect if a given address is accessible on PCIe controller.
-
-Fixes: f6ab898356dd ("PCI: mediatek-gen3: Add Airoha EN7581 support")
-Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/r/20250225-en7581-pcie-pbus-csr-v4-2-24324382424a@kernel.org
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
----
- drivers/pci/controller/pcie-mediatek-gen3.c | 28 ++++++++++++++++++++-
- 1 file changed, 27 insertions(+), 1 deletion(-)
-
---- a/drivers/pci/controller/pcie-mediatek-gen3.c
-+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
-@@ -15,6 +15,7 @@
- #include <linux/irqchip/chained_irq.h>
- #include <linux/irqdomain.h>
- #include <linux/kernel.h>
-+#include <linux/mfd/syscon.h>
- #include <linux/module.h>
- #include <linux/msi.h>
- #include <linux/of_device.h>
-@@ -24,6 +25,7 @@
- #include <linux/platform_device.h>
- #include <linux/pm_domain.h>
- #include <linux/pm_runtime.h>
-+#include <linux/regmap.h>
- #include <linux/reset.h>
- #include "../pci.h"
-@@ -892,9 +894,13 @@ static int mtk_pcie_parse_port(struct mt
- static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
- {
-+      struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
-       struct device *dev = pcie->dev;
-+      struct resource_entry *entry;
-+      struct regmap *pbus_regmap;
-+      u32 val, args[2], size;
-+      resource_size_t addr;
-       int err;
--      u32 val;
-       /*
-        * The controller may have been left out of reset by the bootloader
-@@ -907,6 +913,26 @@ static int mtk_pcie_en7581_power_up(stru
-       msleep(PCIE_EN7581_RESET_TIME_MS);
-       /*
-+       * Configure PBus base address and base address mask to allow the
-+       * hw to detect if a given address is accessible on PCIe controller.
-+       */
-+      pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node,
-+                                                         "mediatek,pbus-csr",
-+                                                         ARRAY_SIZE(args),
-+                                                         args);
-+      if (IS_ERR(pbus_regmap))
-+              return PTR_ERR(pbus_regmap);
-+
-+      entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
-+      if (!entry)
-+              return -ENODEV;
-+
-+      addr = entry->res->start - entry->offset;
-+      regmap_write(pbus_regmap, args[0], lower_32_bits(addr));
-+      size = lower_32_bits(resource_size(entry->res));
-+      regmap_write(pbus_regmap, args[1], GENMASK(31, __fls(size)));
-+
-+      /*
-        * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
-        * requires PHY initialization and power-on before PHY reset deassert.
-        */
diff --git a/target/linux/airoha/patches-6.6/045-v6.14-net-airoha-Fix-wrong-GDM4-register-definition.patch b/target/linux/airoha/patches-6.6/045-v6.14-net-airoha-Fix-wrong-GDM4-register-definition.patch
deleted file mode 100644 (file)
index f17242a..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-From d31a49d37cb132b31cc6683eef2122f8609d6229 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Mon, 20 Jan 2025 16:41:40 +0100
-Subject: [PATCH] net: airoha: Fix wrong GDM4 register definition
-
-Fix wrong GDM4 register definition, in Airoha SDK GDM4 is defined at
-offset 0x2400 but this doesn't make sense as it does conflict with the
-CDM4 that is in the same location.
-
-Following the pattern where each GDM base is at the FWD_CFG, currently
-GDM4 base offset is set to 0x2500. This is correct but REG_GDM4_FWD_CFG
-and REG_GDM4_SRC_PORT_SET are still using the SDK reference with the
-0x2400 offset. Fix these 2 define by subtracting 0x100 to each register
-to reflect the real address location.
-
-Fixes: 23020f049327 ("net: airoha: Introduce ethernet support for EN7581 SoC")
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
-Link: https://patch.msgid.link/20250120154148.13424-1-ansuelsmth@gmail.com
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -266,11 +266,11 @@
- #define REG_GDM3_FWD_CFG              GDM3_BASE
- #define GDM3_PAD_EN_MASK              BIT(28)
--#define REG_GDM4_FWD_CFG              (GDM4_BASE + 0x100)
-+#define REG_GDM4_FWD_CFG              GDM4_BASE
- #define GDM4_PAD_EN_MASK              BIT(28)
- #define GDM4_SPORT_OFFSET0_MASK               GENMASK(11, 8)
--#define REG_GDM4_SRC_PORT_SET         (GDM4_BASE + 0x33c)
-+#define REG_GDM4_SRC_PORT_SET         (GDM4_BASE + 0x23c)
- #define GDM4_SPORT_OFF2_MASK          GENMASK(19, 16)
- #define GDM4_SPORT_OFF1_MASK          GENMASK(15, 12)
- #define GDM4_SPORT_OFF0_MASK          GENMASK(11, 8)
diff --git a/target/linux/airoha/patches-6.6/046-v6.15-net-airoha-Fix-TSO-support-for-header-cloned-skbs.patch b/target/linux/airoha/patches-6.6/046-v6.15-net-airoha-Fix-TSO-support-for-header-cloned-skbs.patch
deleted file mode 100644 (file)
index 9c2443c..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-From c6287e1a858e336cc202b484c6138a0fe252c6b3 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 13 Feb 2025 16:34:20 +0100
-Subject: [PATCH] net: airoha: Fix TSO support for header cloned skbs
-
-For GSO packets, skb_cow_head() will reallocate the skb for TSO header
-cloned skbs in airoha_dev_xmit(). For this reason, sinfo pointer can be
-no more valid. Fix the issue relying on skb_shinfo() macro directly in
-airoha_dev_xmit().
-
-The problem exists since
-commit 23020f049327 ("net: airoha: Introduce ethernet support for EN7581 SoC")
-but it is not a user visible, since we can't currently enable TSO
-for DSA user ports since we are missing to initialize net_device
-vlan_features field.
-
-Reviewed-by: Mateusz Polchlopek <mateusz.polchlopek@intel.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250213-airoha-en7581-flowtable-offload-v4-1-b69ca16d74db@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 10 +++++-----
- 1 file changed, 5 insertions(+), 5 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -2549,11 +2549,10 @@ static u16 airoha_dev_select_queue(struc
- static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
-                                  struct net_device *dev)
- {
--      struct skb_shared_info *sinfo = skb_shinfo(skb);
-       struct airoha_gdm_port *port = netdev_priv(dev);
-+      u32 nr_frags = 1 + skb_shinfo(skb)->nr_frags;
-       u32 msg0, msg1, len = skb_headlen(skb);
-       struct airoha_qdma *qdma = port->qdma;
--      u32 nr_frags = 1 + sinfo->nr_frags;
-       struct netdev_queue *txq;
-       struct airoha_queue *q;
-       void *data = skb->data;
-@@ -2576,8 +2575,9 @@ static netdev_tx_t airoha_dev_xmit(struc
-               if (skb_cow_head(skb, 0))
-                       goto error;
--              if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
--                      __be16 csum = cpu_to_be16(sinfo->gso_size);
-+              if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
-+                                               SKB_GSO_TCPV6)) {
-+                      __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
-                       tcp_hdr(skb)->check = (__force __sum16)csum;
-                       msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
-@@ -2606,7 +2606,7 @@ static netdev_tx_t airoha_dev_xmit(struc
-       for (i = 0; i < nr_frags; i++) {
-               struct airoha_qdma_desc *desc = &q->desc[index];
-               struct airoha_queue_entry *e = &q->entry[index];
--              skb_frag_t *frag = &sinfo->frags[i];
-+              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               dma_addr_t addr;
-               u32 val;
diff --git a/target/linux/airoha/patches-6.6/047-v6.13-net-airoha-Reset-BQL-stopping-the-netdevice.patch b/target/linux/airoha/patches-6.6/047-v6.13-net-airoha-Reset-BQL-stopping-the-netdevice.patch
deleted file mode 100644 (file)
index f20e203..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-From c9f947769b77c8e8f318bfc8a0777e5d20c44d8d Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 17 Oct 2024 16:01:41 +0200
-Subject: [PATCH] net: airoha: Reset BQL stopping the netdevice
-
-Run airoha_qdma_cleanup_tx_queue() in ndo_stop callback in order to
-unmap pending skbs. Moreover, reset BQL txq state stopping the netdevice,
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Hariprasad Kelam <hkelam@marvell.com>
-Message-ID: <20241017-airoha-en7581-reset-bql-v1-1-08c0c9888de5@kernel.org>
-Signed-off-by: Andrew Lunn <andrew@lunn.ch>
----
- drivers/net/ethernet/mediatek/airoha_eth.c | 10 +++++++++-
- 1 file changed, 9 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
-@@ -2469,7 +2469,7 @@ static int airoha_dev_stop(struct net_de
- {
-       struct airoha_gdm_port *port = netdev_priv(dev);
-       struct airoha_qdma *qdma = port->qdma;
--      int err;
-+      int i, err;
-       netif_tx_disable(dev);
-       err = airoha_set_gdm_ports(qdma->eth, false);
-@@ -2480,6 +2480,14 @@ static int airoha_dev_stop(struct net_de
-                         GLOBAL_CFG_TX_DMA_EN_MASK |
-                         GLOBAL_CFG_RX_DMA_EN_MASK);
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-+              if (!qdma->q_tx[i].ndesc)
-+                      continue;
-+
-+              airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
-+              netdev_tx_reset_subqueue(dev, i);
-+      }
-+
-       return 0;
- }
diff --git a/target/linux/airoha/patches-6.6/048-01-v6.15-net-airoha-Move-airoha_eth-driver-in-a-dedicated-fol.patch b/target/linux/airoha/patches-6.6/048-01-v6.15-net-airoha-Move-airoha_eth-driver-in-a-dedicated-fol.patch
deleted file mode 100644 (file)
index 255bbd2..0000000
+++ /dev/null
@@ -1,6825 +0,0 @@
-From fb3dda82fd38ca42140f29b3082324dcdc128293 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:09 +0100
-Subject: [PATCH 01/15] net: airoha: Move airoha_eth driver in a dedicated
- folder
-
-The airoha_eth driver has no codebase shared with mtk_eth_soc one.
-Moreover, the upcoming features (flowtable hw offloading, PCS, ..) will
-not reuse any code from MediaTek driver. Move the Airoha driver in a
-dedicated folder.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/Kconfig                   |  2 ++
- drivers/net/ethernet/Makefile                  |  1 +
- drivers/net/ethernet/airoha/Kconfig            | 18 ++++++++++++++++++
- drivers/net/ethernet/airoha/Makefile           |  6 ++++++
- .../ethernet/{mediatek => airoha}/airoha_eth.c |  0
- drivers/net/ethernet/mediatek/Kconfig          |  8 --------
- drivers/net/ethernet/mediatek/Makefile         |  1 -
- 7 files changed, 27 insertions(+), 9 deletions(-)
- create mode 100644 drivers/net/ethernet/airoha/Kconfig
- create mode 100644 drivers/net/ethernet/airoha/Makefile
- rename drivers/net/ethernet/{mediatek => airoha}/airoha_eth.c (100%)
-
---- a/drivers/net/ethernet/Kconfig
-+++ b/drivers/net/ethernet/Kconfig
-@@ -23,6 +23,8 @@ source "drivers/net/ethernet/actions/Kco
- source "drivers/net/ethernet/adaptec/Kconfig"
- source "drivers/net/ethernet/aeroflex/Kconfig"
- source "drivers/net/ethernet/agere/Kconfig"
-+source "drivers/net/ethernet/airoha/Kconfig"
-+source "drivers/net/ethernet/mellanox/Kconfig"
- source "drivers/net/ethernet/alacritech/Kconfig"
- source "drivers/net/ethernet/allwinner/Kconfig"
- source "drivers/net/ethernet/alteon/Kconfig"
---- a/drivers/net/ethernet/Makefile
-+++ b/drivers/net/ethernet/Makefile
-@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adap
- obj-$(CONFIG_GRETH) += aeroflex/
- obj-$(CONFIG_NET_VENDOR_ADI) += adi/
- obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
-+obj-$(CONFIG_NET_VENDOR_AIROHA) += airoha/
- obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
- obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
- obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
---- /dev/null
-+++ b/drivers/net/ethernet/airoha/Kconfig
-@@ -0,0 +1,18 @@
-+# SPDX-License-Identifier: GPL-2.0-only
-+config NET_VENDOR_AIROHA
-+      bool "Airoha devices"
-+      depends on ARCH_AIROHA || COMPILE_TEST
-+      help
-+        If you have a Airoha SoC with ethernet, say Y.
-+
-+if NET_VENDOR_AIROHA
-+
-+config NET_AIROHA
-+      tristate "Airoha SoC Gigabit Ethernet support"
-+      depends on NET_DSA || !NET_DSA
-+      select PAGE_POOL
-+      help
-+        This driver supports the gigabit ethernet MACs in the
-+        Airoha SoC family.
-+
-+endif #NET_VENDOR_AIROHA
---- /dev/null
-+++ b/drivers/net/ethernet/airoha/Makefile
-@@ -0,0 +1,6 @@
-+# SPDX-License-Identifier: GPL-2.0-only
-+#
-+# Airoha for the Mediatek SoCs built-in ethernet macs
-+#
-+
-+obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
---- a/drivers/net/ethernet/mediatek/Kconfig
-+++ b/drivers/net/ethernet/mediatek/Kconfig
-@@ -7,14 +7,6 @@ config NET_VENDOR_MEDIATEK
- if NET_VENDOR_MEDIATEK
--config NET_AIROHA
--      tristate "Airoha SoC Gigabit Ethernet support"
--      depends on NET_DSA || !NET_DSA
--      select PAGE_POOL
--      help
--        This driver supports the gigabit ethernet MACs in the
--        Airoha SoC family.
--
- config NET_MEDIATEK_SOC_WED
-       depends on ARCH_MEDIATEK || COMPILE_TEST
-       def_bool NET_MEDIATEK_SOC != n
---- a/drivers/net/ethernet/mediatek/Makefile
-+++ b/drivers/net/ethernet/mediatek/Makefile
-@@ -11,4 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) +
- endif
- obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
- obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
--obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
---- /dev/null
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -0,0 +1,3359 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ */
-+#include <linux/etherdevice.h>
-+#include <linux/iopoll.h>
-+#include <linux/kernel.h>
-+#include <linux/netdevice.h>
-+#include <linux/of.h>
-+#include <linux/of_net.h>
-+#include <linux/platform_device.h>
-+#include <linux/reset.h>
-+#include <linux/tcp.h>
-+#include <linux/u64_stats_sync.h>
-+#include <net/dsa.h>
-+#include <net/page_pool/helpers.h>
-+#include <net/pkt_cls.h>
-+#include <uapi/linux/ppp_defs.h>
-+
-+#define AIROHA_MAX_NUM_GDM_PORTS      1
-+#define AIROHA_MAX_NUM_QDMA           2
-+#define AIROHA_MAX_NUM_RSTS           3
-+#define AIROHA_MAX_NUM_XSI_RSTS               5
-+#define AIROHA_MAX_MTU                        2000
-+#define AIROHA_MAX_PACKET_SIZE                2048
-+#define AIROHA_NUM_QOS_CHANNELS               4
-+#define AIROHA_NUM_QOS_QUEUES         8
-+#define AIROHA_NUM_TX_RING            32
-+#define AIROHA_NUM_RX_RING            32
-+#define AIROHA_NUM_NETDEV_TX_RINGS    (AIROHA_NUM_TX_RING + \
-+                                       AIROHA_NUM_QOS_CHANNELS)
-+#define AIROHA_FE_MC_MAX_VLAN_TABLE   64
-+#define AIROHA_FE_MC_MAX_VLAN_PORT    16
-+#define AIROHA_NUM_TX_IRQ             2
-+#define HW_DSCP_NUM                   2048
-+#define IRQ_QUEUE_LEN(_n)             ((_n) ? 1024 : 2048)
-+#define TX_DSCP_NUM                   1024
-+#define RX_DSCP_NUM(_n)                       \
-+      ((_n) ==  2 ? 128 :             \
-+       (_n) == 11 ? 128 :             \
-+       (_n) == 15 ? 128 :             \
-+       (_n) ==  0 ? 1024 : 16)
-+
-+#define PSE_RSV_PAGES                 128
-+#define PSE_QUEUE_RSV_PAGES           64
-+
-+#define QDMA_METER_IDX(_n)            ((_n) & 0xff)
-+#define QDMA_METER_GROUP(_n)          (((_n) >> 8) & 0x3)
-+
-+/* FE */
-+#define PSE_BASE                      0x0100
-+#define CSR_IFC_BASE                  0x0200
-+#define CDM1_BASE                     0x0400
-+#define GDM1_BASE                     0x0500
-+#define PPE1_BASE                     0x0c00
-+
-+#define CDM2_BASE                     0x1400
-+#define GDM2_BASE                     0x1500
-+
-+#define GDM3_BASE                     0x1100
-+#define GDM4_BASE                     0x2500
-+
-+#define GDM_BASE(_n)                  \
-+      ((_n) == 4 ? GDM4_BASE :        \
-+       (_n) == 3 ? GDM3_BASE :        \
-+       (_n) == 2 ? GDM2_BASE : GDM1_BASE)
-+
-+#define REG_FE_DMA_GLO_CFG            0x0000
-+#define FE_DMA_GLO_L2_SPACE_MASK      GENMASK(7, 4)
-+#define FE_DMA_GLO_PG_SZ_MASK         BIT(3)
-+
-+#define REG_FE_RST_GLO_CFG            0x0004
-+#define FE_RST_GDM4_MBI_ARB_MASK      BIT(3)
-+#define FE_RST_GDM3_MBI_ARB_MASK      BIT(2)
-+#define FE_RST_CORE_MASK              BIT(0)
-+
-+#define REG_FE_WAN_MAC_H              0x0030
-+#define REG_FE_LAN_MAC_H              0x0040
-+
-+#define REG_FE_MAC_LMIN(_n)           ((_n) + 0x04)
-+#define REG_FE_MAC_LMAX(_n)           ((_n) + 0x08)
-+
-+#define REG_FE_CDM1_OQ_MAP0           0x0050
-+#define REG_FE_CDM1_OQ_MAP1           0x0054
-+#define REG_FE_CDM1_OQ_MAP2           0x0058
-+#define REG_FE_CDM1_OQ_MAP3           0x005c
-+
-+#define REG_FE_PCE_CFG                        0x0070
-+#define PCE_DPI_EN_MASK                       BIT(2)
-+#define PCE_KA_EN_MASK                        BIT(1)
-+#define PCE_MC_EN_MASK                        BIT(0)
-+
-+#define REG_FE_PSE_QUEUE_CFG_WR               0x0080
-+#define PSE_CFG_PORT_ID_MASK          GENMASK(27, 24)
-+#define PSE_CFG_QUEUE_ID_MASK         GENMASK(20, 16)
-+#define PSE_CFG_WR_EN_MASK            BIT(8)
-+#define PSE_CFG_OQRSV_SEL_MASK                BIT(0)
-+
-+#define REG_FE_PSE_QUEUE_CFG_VAL      0x0084
-+#define PSE_CFG_OQ_RSV_MASK           GENMASK(13, 0)
-+
-+#define PSE_FQ_CFG                    0x008c
-+#define PSE_FQ_LIMIT_MASK             GENMASK(14, 0)
-+
-+#define REG_FE_PSE_BUF_SET            0x0090
-+#define PSE_SHARE_USED_LTHD_MASK      GENMASK(31, 16)
-+#define PSE_ALLRSV_MASK                       GENMASK(14, 0)
-+
-+#define REG_PSE_SHARE_USED_THD                0x0094
-+#define PSE_SHARE_USED_MTHD_MASK      GENMASK(31, 16)
-+#define PSE_SHARE_USED_HTHD_MASK      GENMASK(15, 0)
-+
-+#define REG_GDM_MISC_CFG              0x0148
-+#define GDM2_RDM_ACK_WAIT_PREF_MASK   BIT(9)
-+#define GDM2_CHN_VLD_MODE_MASK                BIT(5)
-+
-+#define REG_FE_CSR_IFC_CFG            CSR_IFC_BASE
-+#define FE_IFC_EN_MASK                        BIT(0)
-+
-+#define REG_FE_VIP_PORT_EN            0x01f0
-+#define REG_FE_IFC_PORT_EN            0x01f4
-+
-+#define REG_PSE_IQ_REV1                       (PSE_BASE + 0x08)
-+#define PSE_IQ_RES1_P2_MASK           GENMASK(23, 16)
-+
-+#define REG_PSE_IQ_REV2                       (PSE_BASE + 0x0c)
-+#define PSE_IQ_RES2_P5_MASK           GENMASK(15, 8)
-+#define PSE_IQ_RES2_P4_MASK           GENMASK(7, 0)
-+
-+#define REG_FE_VIP_EN(_n)             (0x0300 + ((_n) << 3))
-+#define PATN_FCPU_EN_MASK             BIT(7)
-+#define PATN_SWP_EN_MASK              BIT(6)
-+#define PATN_DP_EN_MASK                       BIT(5)
-+#define PATN_SP_EN_MASK                       BIT(4)
-+#define PATN_TYPE_MASK                        GENMASK(3, 1)
-+#define PATN_EN_MASK                  BIT(0)
-+
-+#define REG_FE_VIP_PATN(_n)           (0x0304 + ((_n) << 3))
-+#define PATN_DP_MASK                  GENMASK(31, 16)
-+#define PATN_SP_MASK                  GENMASK(15, 0)
-+
-+#define REG_CDM1_VLAN_CTRL            CDM1_BASE
-+#define CDM1_VLAN_MASK                        GENMASK(31, 16)
-+
-+#define REG_CDM1_FWD_CFG              (CDM1_BASE + 0x08)
-+#define CDM1_VIP_QSEL_MASK            GENMASK(24, 20)
-+
-+#define REG_CDM1_CRSN_QSEL(_n)                (CDM1_BASE + 0x10 + ((_n) << 2))
-+#define CDM1_CRSN_QSEL_REASON_MASK(_n)        \
-+      GENMASK(4 + (((_n) % 4) << 3),  (((_n) % 4) << 3))
-+
-+#define REG_CDM2_FWD_CFG              (CDM2_BASE + 0x08)
-+#define CDM2_OAM_QSEL_MASK            GENMASK(31, 27)
-+#define CDM2_VIP_QSEL_MASK            GENMASK(24, 20)
-+
-+#define REG_CDM2_CRSN_QSEL(_n)                (CDM2_BASE + 0x10 + ((_n) << 2))
-+#define CDM2_CRSN_QSEL_REASON_MASK(_n)        \
-+      GENMASK(4 + (((_n) % 4) << 3),  (((_n) % 4) << 3))
-+
-+#define REG_GDM_FWD_CFG(_n)           GDM_BASE(_n)
-+#define GDM_DROP_CRC_ERR              BIT(23)
-+#define GDM_IP4_CKSUM                 BIT(22)
-+#define GDM_TCP_CKSUM                 BIT(21)
-+#define GDM_UDP_CKSUM                 BIT(20)
-+#define GDM_UCFQ_MASK                 GENMASK(15, 12)
-+#define GDM_BCFQ_MASK                 GENMASK(11, 8)
-+#define GDM_MCFQ_MASK                 GENMASK(7, 4)
-+#define GDM_OCFQ_MASK                 GENMASK(3, 0)
-+
-+#define REG_GDM_INGRESS_CFG(_n)               (GDM_BASE(_n) + 0x10)
-+#define GDM_INGRESS_FC_EN_MASK                BIT(1)
-+#define GDM_STAG_EN_MASK              BIT(0)
-+
-+#define REG_GDM_LEN_CFG(_n)           (GDM_BASE(_n) + 0x14)
-+#define GDM_SHORT_LEN_MASK            GENMASK(13, 0)
-+#define GDM_LONG_LEN_MASK             GENMASK(29, 16)
-+
-+#define REG_FE_CPORT_CFG              (GDM1_BASE + 0x40)
-+#define FE_CPORT_PAD                  BIT(26)
-+#define FE_CPORT_PORT_XFC_MASK                BIT(25)
-+#define FE_CPORT_QUEUE_XFC_MASK               BIT(24)
-+
-+#define REG_FE_GDM_MIB_CLEAR(_n)      (GDM_BASE(_n) + 0xf0)
-+#define FE_GDM_MIB_RX_CLEAR_MASK      BIT(1)
-+#define FE_GDM_MIB_TX_CLEAR_MASK      BIT(0)
-+
-+#define REG_FE_GDM1_MIB_CFG           (GDM1_BASE + 0xf4)
-+#define FE_STRICT_RFC2819_MODE_MASK   BIT(31)
-+#define FE_GDM1_TX_MIB_SPLIT_EN_MASK  BIT(17)
-+#define FE_GDM1_RX_MIB_SPLIT_EN_MASK  BIT(16)
-+#define FE_TX_MIB_ID_MASK             GENMASK(15, 8)
-+#define FE_RX_MIB_ID_MASK             GENMASK(7, 0)
-+
-+#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n)                (GDM_BASE(_n) + 0x104)
-+#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n)               (GDM_BASE(_n) + 0x10c)
-+#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n)               (GDM_BASE(_n) + 0x110)
-+#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n)      (GDM_BASE(_n) + 0x114)
-+#define REG_FE_GDM_TX_ETH_DROP_CNT(_n)                (GDM_BASE(_n) + 0x118)
-+#define REG_FE_GDM_TX_ETH_BC_CNT(_n)          (GDM_BASE(_n) + 0x11c)
-+#define REG_FE_GDM_TX_ETH_MC_CNT(_n)          (GDM_BASE(_n) + 0x120)
-+#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n)                (GDM_BASE(_n) + 0x124)
-+#define REG_FE_GDM_TX_ETH_LONG_CNT(_n)                (GDM_BASE(_n) + 0x128)
-+#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n)               (GDM_BASE(_n) + 0x12c)
-+#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n)               (GDM_BASE(_n) + 0x130)
-+#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n)      (GDM_BASE(_n) + 0x134)
-+#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n)      (GDM_BASE(_n) + 0x138)
-+#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n)      (GDM_BASE(_n) + 0x13c)
-+#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n)     (GDM_BASE(_n) + 0x140)
-+
-+#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n)                (GDM_BASE(_n) + 0x148)
-+#define REG_FE_GDM_RX_FC_DROP_CNT(_n)         (GDM_BASE(_n) + 0x14c)
-+#define REG_FE_GDM_RX_RC_DROP_CNT(_n)         (GDM_BASE(_n) + 0x150)
-+#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n)   (GDM_BASE(_n) + 0x154)
-+#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n)      (GDM_BASE(_n) + 0x158)
-+#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n)               (GDM_BASE(_n) + 0x15c)
-+#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n)               (GDM_BASE(_n) + 0x160)
-+#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n)      (GDM_BASE(_n) + 0x164)
-+#define REG_FE_GDM_RX_ETH_DROP_CNT(_n)                (GDM_BASE(_n) + 0x168)
-+#define REG_FE_GDM_RX_ETH_BC_CNT(_n)          (GDM_BASE(_n) + 0x16c)
-+#define REG_FE_GDM_RX_ETH_MC_CNT(_n)          (GDM_BASE(_n) + 0x170)
-+#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n)     (GDM_BASE(_n) + 0x174)
-+#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n)                (GDM_BASE(_n) + 0x178)
-+#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n)      (GDM_BASE(_n) + 0x17c)
-+#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n)                (GDM_BASE(_n) + 0x180)
-+#define REG_FE_GDM_RX_ETH_LONG_CNT(_n)                (GDM_BASE(_n) + 0x184)
-+#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n)               (GDM_BASE(_n) + 0x188)
-+#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n)               (GDM_BASE(_n) + 0x18c)
-+#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n)      (GDM_BASE(_n) + 0x190)
-+#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n)      (GDM_BASE(_n) + 0x194)
-+#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n)      (GDM_BASE(_n) + 0x198)
-+#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n)     (GDM_BASE(_n) + 0x19c)
-+
-+#define REG_PPE1_TB_HASH_CFG          (PPE1_BASE + 0x250)
-+#define PPE1_SRAM_TABLE_EN_MASK               BIT(0)
-+#define PPE1_SRAM_HASH1_EN_MASK               BIT(8)
-+#define PPE1_DRAM_TABLE_EN_MASK               BIT(16)
-+#define PPE1_DRAM_HASH1_EN_MASK               BIT(24)
-+
-+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n)                (GDM_BASE(_n) + 0x280)
-+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n)               (GDM_BASE(_n) + 0x284)
-+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n)               (GDM_BASE(_n) + 0x288)
-+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n)      (GDM_BASE(_n) + 0x28c)
-+
-+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n)                (GDM_BASE(_n) + 0x290)
-+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n)               (GDM_BASE(_n) + 0x294)
-+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n)               (GDM_BASE(_n) + 0x298)
-+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n)      (GDM_BASE(_n) + 0x29c)
-+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n)               (GDM_BASE(_n) + 0x2b8)
-+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n)               (GDM_BASE(_n) + 0x2bc)
-+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n)      (GDM_BASE(_n) + 0x2c0)
-+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n)      (GDM_BASE(_n) + 0x2c4)
-+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n)      (GDM_BASE(_n) + 0x2c8)
-+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n)     (GDM_BASE(_n) + 0x2cc)
-+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n)               (GDM_BASE(_n) + 0x2e8)
-+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n)               (GDM_BASE(_n) + 0x2ec)
-+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n)      (GDM_BASE(_n) + 0x2f0)
-+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n)      (GDM_BASE(_n) + 0x2f4)
-+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n)      (GDM_BASE(_n) + 0x2f8)
-+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n)     (GDM_BASE(_n) + 0x2fc)
-+
-+#define REG_GDM2_CHN_RLS              (GDM2_BASE + 0x20)
-+#define MBI_RX_AGE_SEL_MASK           GENMASK(26, 25)
-+#define MBI_TX_AGE_SEL_MASK           GENMASK(18, 17)
-+
-+#define REG_GDM3_FWD_CFG              GDM3_BASE
-+#define GDM3_PAD_EN_MASK              BIT(28)
-+
-+#define REG_GDM4_FWD_CFG              GDM4_BASE
-+#define GDM4_PAD_EN_MASK              BIT(28)
-+#define GDM4_SPORT_OFFSET0_MASK               GENMASK(11, 8)
-+
-+#define REG_GDM4_SRC_PORT_SET         (GDM4_BASE + 0x23c)
-+#define GDM4_SPORT_OFF2_MASK          GENMASK(19, 16)
-+#define GDM4_SPORT_OFF1_MASK          GENMASK(15, 12)
-+#define GDM4_SPORT_OFF0_MASK          GENMASK(11, 8)
-+
-+#define REG_IP_FRAG_FP                        0x2010
-+#define IP_ASSEMBLE_PORT_MASK         GENMASK(24, 21)
-+#define IP_ASSEMBLE_NBQ_MASK          GENMASK(20, 16)
-+#define IP_FRAGMENT_PORT_MASK         GENMASK(8, 5)
-+#define IP_FRAGMENT_NBQ_MASK          GENMASK(4, 0)
-+
-+#define REG_MC_VLAN_EN                        0x2100
-+#define MC_VLAN_EN_MASK                       BIT(0)
-+
-+#define REG_MC_VLAN_CFG                       0x2104
-+#define MC_VLAN_CFG_CMD_DONE_MASK     BIT(31)
-+#define MC_VLAN_CFG_TABLE_ID_MASK     GENMASK(21, 16)
-+#define MC_VLAN_CFG_PORT_ID_MASK      GENMASK(11, 8)
-+#define MC_VLAN_CFG_TABLE_SEL_MASK    BIT(4)
-+#define MC_VLAN_CFG_RW_MASK           BIT(0)
-+
-+#define REG_MC_VLAN_DATA              0x2108
-+
-+#define REG_CDM5_RX_OQ1_DROP_CNT      0x29d4
-+
-+/* QDMA */
-+#define REG_QDMA_GLOBAL_CFG                   0x0004
-+#define GLOBAL_CFG_RX_2B_OFFSET_MASK          BIT(31)
-+#define GLOBAL_CFG_DMA_PREFERENCE_MASK                GENMASK(30, 29)
-+#define GLOBAL_CFG_CPU_TXR_RR_MASK            BIT(28)
-+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK                BIT(27)
-+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK     BIT(26)
-+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK   BIT(25)
-+#define GLOBAL_CFG_OAM_MODIFY_MASK            BIT(24)
-+#define GLOBAL_CFG_RESET_MASK                 BIT(23)
-+#define GLOBAL_CFG_RESET_DONE_MASK            BIT(22)
-+#define GLOBAL_CFG_MULTICAST_EN_MASK          BIT(21)
-+#define GLOBAL_CFG_IRQ1_EN_MASK                       BIT(20)
-+#define GLOBAL_CFG_IRQ0_EN_MASK                       BIT(19)
-+#define GLOBAL_CFG_LOOPCNT_EN_MASK            BIT(18)
-+#define GLOBAL_CFG_RD_BYPASS_WR_MASK          BIT(17)
-+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK         BIT(16)
-+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK          GENMASK(13, 8)
-+#define GLOBAL_CFG_CHECK_DONE_MASK            BIT(7)
-+#define GLOBAL_CFG_TX_WB_DONE_MASK            BIT(6)
-+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK         GENMASK(5, 4)
-+#define GLOBAL_CFG_RX_DMA_BUSY_MASK           BIT(3)
-+#define GLOBAL_CFG_RX_DMA_EN_MASK             BIT(2)
-+#define GLOBAL_CFG_TX_DMA_BUSY_MASK           BIT(1)
-+#define GLOBAL_CFG_TX_DMA_EN_MASK             BIT(0)
-+
-+#define REG_FWD_DSCP_BASE                     0x0010
-+#define REG_FWD_BUF_BASE                      0x0014
-+
-+#define REG_HW_FWD_DSCP_CFG                   0x0018
-+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK         GENMASK(29, 28)
-+#define HW_FWD_DSCP_SCATTER_LEN_MASK          GENMASK(17, 16)
-+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK      GENMASK(15, 0)
-+
-+#define REG_INT_STATUS(_n)            \
-+      (((_n) == 4) ? 0x0730 :         \
-+       ((_n) == 3) ? 0x0724 :         \
-+       ((_n) == 2) ? 0x0720 :         \
-+       ((_n) == 1) ? 0x0024 : 0x0020)
-+
-+#define REG_INT_ENABLE(_n)            \
-+      (((_n) == 4) ? 0x0750 :         \
-+       ((_n) == 3) ? 0x0744 :         \
-+       ((_n) == 2) ? 0x0740 :         \
-+       ((_n) == 1) ? 0x002c : 0x0028)
-+
-+/* QDMA_CSR_INT_ENABLE1 */
-+#define RX15_COHERENT_INT_MASK                BIT(31)
-+#define RX14_COHERENT_INT_MASK                BIT(30)
-+#define RX13_COHERENT_INT_MASK                BIT(29)
-+#define RX12_COHERENT_INT_MASK                BIT(28)
-+#define RX11_COHERENT_INT_MASK                BIT(27)
-+#define RX10_COHERENT_INT_MASK                BIT(26)
-+#define RX9_COHERENT_INT_MASK         BIT(25)
-+#define RX8_COHERENT_INT_MASK         BIT(24)
-+#define RX7_COHERENT_INT_MASK         BIT(23)
-+#define RX6_COHERENT_INT_MASK         BIT(22)
-+#define RX5_COHERENT_INT_MASK         BIT(21)
-+#define RX4_COHERENT_INT_MASK         BIT(20)
-+#define RX3_COHERENT_INT_MASK         BIT(19)
-+#define RX2_COHERENT_INT_MASK         BIT(18)
-+#define RX1_COHERENT_INT_MASK         BIT(17)
-+#define RX0_COHERENT_INT_MASK         BIT(16)
-+#define TX7_COHERENT_INT_MASK         BIT(15)
-+#define TX6_COHERENT_INT_MASK         BIT(14)
-+#define TX5_COHERENT_INT_MASK         BIT(13)
-+#define TX4_COHERENT_INT_MASK         BIT(12)
-+#define TX3_COHERENT_INT_MASK         BIT(11)
-+#define TX2_COHERENT_INT_MASK         BIT(10)
-+#define TX1_COHERENT_INT_MASK         BIT(9)
-+#define TX0_COHERENT_INT_MASK         BIT(8)
-+#define CNT_OVER_FLOW_INT_MASK                BIT(7)
-+#define IRQ1_FULL_INT_MASK            BIT(5)
-+#define IRQ1_INT_MASK                 BIT(4)
-+#define HWFWD_DSCP_LOW_INT_MASK               BIT(3)
-+#define HWFWD_DSCP_EMPTY_INT_MASK     BIT(2)
-+#define IRQ0_FULL_INT_MASK            BIT(1)
-+#define IRQ0_INT_MASK                 BIT(0)
-+
-+#define TX_DONE_INT_MASK(_n)                                  \
-+      ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK              \
-+            : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-+
-+#define INT_TX_MASK                                           \
-+      (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK |                   \
-+       IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-+
-+#define INT_IDX0_MASK                                         \
-+      (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK |        \
-+       TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK |        \
-+       TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK |        \
-+       TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK |        \
-+       RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK |        \
-+       RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK |        \
-+       RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK |        \
-+       RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK |        \
-+       RX15_COHERENT_INT_MASK | INT_TX_MASK)
-+
-+/* QDMA_CSR_INT_ENABLE2 */
-+#define RX15_NO_CPU_DSCP_INT_MASK     BIT(31)
-+#define RX14_NO_CPU_DSCP_INT_MASK     BIT(30)
-+#define RX13_NO_CPU_DSCP_INT_MASK     BIT(29)
-+#define RX12_NO_CPU_DSCP_INT_MASK     BIT(28)
-+#define RX11_NO_CPU_DSCP_INT_MASK     BIT(27)
-+#define RX10_NO_CPU_DSCP_INT_MASK     BIT(26)
-+#define RX9_NO_CPU_DSCP_INT_MASK      BIT(25)
-+#define RX8_NO_CPU_DSCP_INT_MASK      BIT(24)
-+#define RX7_NO_CPU_DSCP_INT_MASK      BIT(23)
-+#define RX6_NO_CPU_DSCP_INT_MASK      BIT(22)
-+#define RX5_NO_CPU_DSCP_INT_MASK      BIT(21)
-+#define RX4_NO_CPU_DSCP_INT_MASK      BIT(20)
-+#define RX3_NO_CPU_DSCP_INT_MASK      BIT(19)
-+#define RX2_NO_CPU_DSCP_INT_MASK      BIT(18)
-+#define RX1_NO_CPU_DSCP_INT_MASK      BIT(17)
-+#define RX0_NO_CPU_DSCP_INT_MASK      BIT(16)
-+#define RX15_DONE_INT_MASK            BIT(15)
-+#define RX14_DONE_INT_MASK            BIT(14)
-+#define RX13_DONE_INT_MASK            BIT(13)
-+#define RX12_DONE_INT_MASK            BIT(12)
-+#define RX11_DONE_INT_MASK            BIT(11)
-+#define RX10_DONE_INT_MASK            BIT(10)
-+#define RX9_DONE_INT_MASK             BIT(9)
-+#define RX8_DONE_INT_MASK             BIT(8)
-+#define RX7_DONE_INT_MASK             BIT(7)
-+#define RX6_DONE_INT_MASK             BIT(6)
-+#define RX5_DONE_INT_MASK             BIT(5)
-+#define RX4_DONE_INT_MASK             BIT(4)
-+#define RX3_DONE_INT_MASK             BIT(3)
-+#define RX2_DONE_INT_MASK             BIT(2)
-+#define RX1_DONE_INT_MASK             BIT(1)
-+#define RX0_DONE_INT_MASK             BIT(0)
-+
-+#define RX_DONE_INT_MASK                                      \
-+      (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK |                \
-+       RX2_DONE_INT_MASK | RX3_DONE_INT_MASK |                \
-+       RX4_DONE_INT_MASK | RX7_DONE_INT_MASK |                \
-+       RX8_DONE_INT_MASK | RX9_DONE_INT_MASK |                \
-+       RX15_DONE_INT_MASK)
-+#define INT_IDX1_MASK                                         \
-+      (RX_DONE_INT_MASK |                                     \
-+       RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK |  \
-+       RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK |  \
-+       RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK |  \
-+       RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK |  \
-+       RX15_NO_CPU_DSCP_INT_MASK)
-+
-+/* QDMA_CSR_INT_ENABLE5 */
-+#define TX31_COHERENT_INT_MASK                BIT(31)
-+#define TX30_COHERENT_INT_MASK                BIT(30)
-+#define TX29_COHERENT_INT_MASK                BIT(29)
-+#define TX28_COHERENT_INT_MASK                BIT(28)
-+#define TX27_COHERENT_INT_MASK                BIT(27)
-+#define TX26_COHERENT_INT_MASK                BIT(26)
-+#define TX25_COHERENT_INT_MASK                BIT(25)
-+#define TX24_COHERENT_INT_MASK                BIT(24)
-+#define TX23_COHERENT_INT_MASK                BIT(23)
-+#define TX22_COHERENT_INT_MASK                BIT(22)
-+#define TX21_COHERENT_INT_MASK                BIT(21)
-+#define TX20_COHERENT_INT_MASK                BIT(20)
-+#define TX19_COHERENT_INT_MASK                BIT(19)
-+#define TX18_COHERENT_INT_MASK                BIT(18)
-+#define TX17_COHERENT_INT_MASK                BIT(17)
-+#define TX16_COHERENT_INT_MASK                BIT(16)
-+#define TX15_COHERENT_INT_MASK                BIT(15)
-+#define TX14_COHERENT_INT_MASK                BIT(14)
-+#define TX13_COHERENT_INT_MASK                BIT(13)
-+#define TX12_COHERENT_INT_MASK                BIT(12)
-+#define TX11_COHERENT_INT_MASK                BIT(11)
-+#define TX10_COHERENT_INT_MASK                BIT(10)
-+#define TX9_COHERENT_INT_MASK         BIT(9)
-+#define TX8_COHERENT_INT_MASK         BIT(8)
-+
-+#define INT_IDX4_MASK                                         \
-+      (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK |        \
-+       TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK |      \
-+       TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK |      \
-+       TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK |      \
-+       TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK |      \
-+       TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK |      \
-+       TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK |      \
-+       TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK |      \
-+       TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK |      \
-+       TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK |      \
-+       TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK |      \
-+       TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
-+
-+#define REG_TX_IRQ_BASE(_n)           ((_n) ? 0x0048 : 0x0050)
-+
-+#define REG_TX_IRQ_CFG(_n)            ((_n) ? 0x004c : 0x0054)
-+#define TX_IRQ_THR_MASK                       GENMASK(27, 16)
-+#define TX_IRQ_DEPTH_MASK             GENMASK(11, 0)
-+
-+#define REG_IRQ_CLEAR_LEN(_n)         ((_n) ? 0x0064 : 0x0058)
-+#define IRQ_CLEAR_LEN_MASK            GENMASK(7, 0)
-+
-+#define REG_IRQ_STATUS(_n)            ((_n) ? 0x0068 : 0x005c)
-+#define IRQ_ENTRY_LEN_MASK            GENMASK(27, 16)
-+#define IRQ_HEAD_IDX_MASK             GENMASK(11, 0)
-+
-+#define REG_TX_RING_BASE(_n)  \
-+      (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
-+
-+#define REG_TX_RING_BLOCKING(_n)      \
-+      (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
-+
-+#define TX_RING_IRQ_BLOCKING_MAP_MASK                 BIT(6)
-+#define TX_RING_IRQ_BLOCKING_CFG_MASK                 BIT(4)
-+#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK          BIT(2)
-+#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK    BIT(1)
-+#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK    BIT(0)
-+
-+#define REG_TX_CPU_IDX(_n)    \
-+      (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
-+
-+#define TX_RING_CPU_IDX_MASK          GENMASK(15, 0)
-+
-+#define REG_TX_DMA_IDX(_n)    \
-+      (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
-+
-+#define TX_RING_DMA_IDX_MASK          GENMASK(15, 0)
-+
-+#define IRQ_RING_IDX_MASK             GENMASK(20, 16)
-+#define IRQ_DESC_IDX_MASK             GENMASK(15, 0)
-+
-+#define REG_RX_RING_BASE(_n)  \
-+      (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
-+
-+#define REG_RX_RING_SIZE(_n)  \
-+      (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
-+
-+#define RX_RING_THR_MASK              GENMASK(31, 16)
-+#define RX_RING_SIZE_MASK             GENMASK(15, 0)
-+
-+#define REG_RX_CPU_IDX(_n)    \
-+      (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
-+
-+#define RX_RING_CPU_IDX_MASK          GENMASK(15, 0)
-+
-+#define REG_RX_DMA_IDX(_n)    \
-+      (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
-+
-+#define REG_RX_DELAY_INT_IDX(_n)      \
-+      (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
-+
-+#define RX_DELAY_INT_MASK             GENMASK(15, 0)
-+
-+#define RX_RING_DMA_IDX_MASK          GENMASK(15, 0)
-+
-+#define REG_INGRESS_TRTCM_CFG         0x0070
-+#define INGRESS_TRTCM_EN_MASK         BIT(31)
-+#define INGRESS_TRTCM_MODE_MASK               BIT(30)
-+#define INGRESS_SLOW_TICK_RATIO_MASK  GENMASK(29, 16)
-+#define INGRESS_FAST_TICK_MASK                GENMASK(15, 0)
-+
-+#define REG_QUEUE_CLOSE_CFG(_n)               (0x00a0 + ((_n) & 0xfc))
-+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m)   BIT((_m) + (((_n) & 0x3) << 3))
-+
-+#define REG_TXQ_DIS_CFG_BASE(_n)      ((_n) ? 0x20a0 : 0x00a0)
-+#define REG_TXQ_DIS_CFG(_n, _m)               (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
-+
-+#define REG_CNTR_CFG(_n)              (0x0400 + ((_n) << 3))
-+#define CNTR_EN_MASK                  BIT(31)
-+#define CNTR_ALL_CHAN_EN_MASK         BIT(30)
-+#define CNTR_ALL_QUEUE_EN_MASK                BIT(29)
-+#define CNTR_ALL_DSCP_RING_EN_MASK    BIT(28)
-+#define CNTR_SRC_MASK                 GENMASK(27, 24)
-+#define CNTR_DSCP_RING_MASK           GENMASK(20, 16)
-+#define CNTR_CHAN_MASK                        GENMASK(7, 3)
-+#define CNTR_QUEUE_MASK                       GENMASK(2, 0)
-+
-+#define REG_CNTR_VAL(_n)              (0x0404 + ((_n) << 3))
-+
-+#define REG_LMGR_INIT_CFG             0x1000
-+#define LMGR_INIT_START                       BIT(31)
-+#define LMGR_SRAM_MODE_MASK           BIT(30)
-+#define HW_FWD_PKTSIZE_OVERHEAD_MASK  GENMASK(27, 20)
-+#define HW_FWD_DESC_NUM_MASK          GENMASK(16, 0)
-+
-+#define REG_FWD_DSCP_LOW_THR          0x1004
-+#define FWD_DSCP_LOW_THR_MASK         GENMASK(17, 0)
-+
-+#define REG_EGRESS_RATE_METER_CFG             0x100c
-+#define EGRESS_RATE_METER_EN_MASK             BIT(31)
-+#define EGRESS_RATE_METER_EQ_RATE_EN_MASK     BIT(17)
-+#define EGRESS_RATE_METER_WINDOW_SZ_MASK      GENMASK(16, 12)
-+#define EGRESS_RATE_METER_TIMESLICE_MASK      GENMASK(10, 0)
-+
-+#define REG_EGRESS_TRTCM_CFG          0x1010
-+#define EGRESS_TRTCM_EN_MASK          BIT(31)
-+#define EGRESS_TRTCM_MODE_MASK                BIT(30)
-+#define EGRESS_SLOW_TICK_RATIO_MASK   GENMASK(29, 16)
-+#define EGRESS_FAST_TICK_MASK         GENMASK(15, 0)
-+
-+#define TRTCM_PARAM_RW_MASK           BIT(31)
-+#define TRTCM_PARAM_RW_DONE_MASK      BIT(30)
-+#define TRTCM_PARAM_TYPE_MASK         GENMASK(29, 28)
-+#define TRTCM_METER_GROUP_MASK                GENMASK(27, 26)
-+#define TRTCM_PARAM_INDEX_MASK                GENMASK(23, 17)
-+#define TRTCM_PARAM_RATE_TYPE_MASK    BIT(16)
-+
-+#define REG_TRTCM_CFG_PARAM(_n)               ((_n) + 0x4)
-+#define REG_TRTCM_DATA_LOW(_n)                ((_n) + 0x8)
-+#define REG_TRTCM_DATA_HIGH(_n)               ((_n) + 0xc)
-+
-+#define REG_TXWRR_MODE_CFG            0x1020
-+#define TWRR_WEIGHT_SCALE_MASK                BIT(31)
-+#define TWRR_WEIGHT_BASE_MASK         BIT(3)
-+
-+#define REG_TXWRR_WEIGHT_CFG          0x1024
-+#define TWRR_RW_CMD_MASK              BIT(31)
-+#define TWRR_RW_CMD_DONE              BIT(30)
-+#define TWRR_CHAN_IDX_MASK            GENMASK(23, 19)
-+#define TWRR_QUEUE_IDX_MASK           GENMASK(18, 16)
-+#define TWRR_VALUE_MASK                       GENMASK(15, 0)
-+
-+#define REG_PSE_BUF_USAGE_CFG         0x1028
-+#define PSE_BUF_ESTIMATE_EN_MASK      BIT(29)
-+
-+#define REG_CHAN_QOS_MODE(_n)         (0x1040 + ((_n) << 2))
-+#define CHAN_QOS_MODE_MASK(_n)                GENMASK(2 + ((_n) << 2), (_n) << 2)
-+
-+#define REG_GLB_TRTCM_CFG             0x1080
-+#define GLB_TRTCM_EN_MASK             BIT(31)
-+#define GLB_TRTCM_MODE_MASK           BIT(30)
-+#define GLB_SLOW_TICK_RATIO_MASK      GENMASK(29, 16)
-+#define GLB_FAST_TICK_MASK            GENMASK(15, 0)
-+
-+#define REG_TXQ_CNGST_CFG             0x10a0
-+#define TXQ_CNGST_DROP_EN             BIT(31)
-+#define TXQ_CNGST_DEI_DROP_EN         BIT(30)
-+
-+#define REG_SLA_TRTCM_CFG             0x1150
-+#define SLA_TRTCM_EN_MASK             BIT(31)
-+#define SLA_TRTCM_MODE_MASK           BIT(30)
-+#define SLA_SLOW_TICK_RATIO_MASK      GENMASK(29, 16)
-+#define SLA_FAST_TICK_MASK            GENMASK(15, 0)
-+
-+/* CTRL */
-+#define QDMA_DESC_DONE_MASK           BIT(31)
-+#define QDMA_DESC_DROP_MASK           BIT(30) /* tx: drop - rx: overflow */
-+#define QDMA_DESC_MORE_MASK           BIT(29) /* more SG elements */
-+#define QDMA_DESC_DEI_MASK            BIT(25)
-+#define QDMA_DESC_NO_DROP_MASK                BIT(24)
-+#define QDMA_DESC_LEN_MASK            GENMASK(15, 0)
-+/* DATA */
-+#define QDMA_DESC_NEXT_ID_MASK                GENMASK(15, 0)
-+/* TX MSG0 */
-+#define QDMA_ETH_TXMSG_MIC_IDX_MASK   BIT(30)
-+#define QDMA_ETH_TXMSG_SP_TAG_MASK    GENMASK(29, 14)
-+#define QDMA_ETH_TXMSG_ICO_MASK               BIT(13)
-+#define QDMA_ETH_TXMSG_UCO_MASK               BIT(12)
-+#define QDMA_ETH_TXMSG_TCO_MASK               BIT(11)
-+#define QDMA_ETH_TXMSG_TSO_MASK               BIT(10)
-+#define QDMA_ETH_TXMSG_FAST_MASK      BIT(9)
-+#define QDMA_ETH_TXMSG_OAM_MASK               BIT(8)
-+#define QDMA_ETH_TXMSG_CHAN_MASK      GENMASK(7, 3)
-+#define QDMA_ETH_TXMSG_QUEUE_MASK     GENMASK(2, 0)
-+/* TX MSG1 */
-+#define QDMA_ETH_TXMSG_NO_DROP                BIT(31)
-+#define QDMA_ETH_TXMSG_METER_MASK     GENMASK(30, 24) /* 0x7f no meters */
-+#define QDMA_ETH_TXMSG_FPORT_MASK     GENMASK(23, 20)
-+#define QDMA_ETH_TXMSG_NBOQ_MASK      GENMASK(19, 15)
-+#define QDMA_ETH_TXMSG_HWF_MASK               BIT(14)
-+#define QDMA_ETH_TXMSG_HOP_MASK               BIT(13)
-+#define QDMA_ETH_TXMSG_PTP_MASK               BIT(12)
-+#define QDMA_ETH_TXMSG_ACNT_G1_MASK   GENMASK(10, 6)  /* 0x1f do not count */
-+#define QDMA_ETH_TXMSG_ACNT_G0_MASK   GENMASK(5, 0)   /* 0x3f do not count */
-+
-+/* RX MSG1 */
-+#define QDMA_ETH_RXMSG_DEI_MASK               BIT(31)
-+#define QDMA_ETH_RXMSG_IP6_MASK               BIT(30)
-+#define QDMA_ETH_RXMSG_IP4_MASK               BIT(29)
-+#define QDMA_ETH_RXMSG_IP4F_MASK      BIT(28)
-+#define QDMA_ETH_RXMSG_L4_VALID_MASK  BIT(27)
-+#define QDMA_ETH_RXMSG_L4F_MASK               BIT(26)
-+#define QDMA_ETH_RXMSG_SPORT_MASK     GENMASK(25, 21)
-+#define QDMA_ETH_RXMSG_CRSN_MASK      GENMASK(20, 16)
-+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
-+
-+struct airoha_qdma_desc {
-+      __le32 rsv;
-+      __le32 ctrl;
-+      __le32 addr;
-+      __le32 data;
-+      __le32 msg0;
-+      __le32 msg1;
-+      __le32 msg2;
-+      __le32 msg3;
-+};
-+
-+/* CTRL0 */
-+#define QDMA_FWD_DESC_CTX_MASK                BIT(31)
-+#define QDMA_FWD_DESC_RING_MASK               GENMASK(30, 28)
-+#define QDMA_FWD_DESC_IDX_MASK                GENMASK(27, 16)
-+#define QDMA_FWD_DESC_LEN_MASK                GENMASK(15, 0)
-+/* CTRL1 */
-+#define QDMA_FWD_DESC_FIRST_IDX_MASK  GENMASK(15, 0)
-+/* CTRL2 */
-+#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK       GENMASK(2, 0)
-+
-+struct airoha_qdma_fwd_desc {
-+      __le32 addr;
-+      __le32 ctrl0;
-+      __le32 ctrl1;
-+      __le32 ctrl2;
-+      __le32 msg0;
-+      __le32 msg1;
-+      __le32 rsv0;
-+      __le32 rsv1;
-+};
-+
-+enum {
-+      QDMA_INT_REG_IDX0,
-+      QDMA_INT_REG_IDX1,
-+      QDMA_INT_REG_IDX2,
-+      QDMA_INT_REG_IDX3,
-+      QDMA_INT_REG_IDX4,
-+      QDMA_INT_REG_MAX
-+};
-+
-+enum {
-+      XSI_PCIE0_PORT,
-+      XSI_PCIE1_PORT,
-+      XSI_USB_PORT,
-+      XSI_AE_PORT,
-+      XSI_ETH_PORT,
-+};
-+
-+enum {
-+      XSI_PCIE0_VIP_PORT_MASK = BIT(22),
-+      XSI_PCIE1_VIP_PORT_MASK = BIT(23),
-+      XSI_USB_VIP_PORT_MASK   = BIT(25),
-+      XSI_ETH_VIP_PORT_MASK   = BIT(24),
-+};
-+
-+enum {
-+      DEV_STATE_INITIALIZED,
-+};
-+
-+enum {
-+      CDM_CRSN_QSEL_Q1 = 1,
-+      CDM_CRSN_QSEL_Q5 = 5,
-+      CDM_CRSN_QSEL_Q6 = 6,
-+      CDM_CRSN_QSEL_Q15 = 15,
-+};
-+
-+enum {
-+      CRSN_08 = 0x8,
-+      CRSN_21 = 0x15, /* KA */
-+      CRSN_22 = 0x16, /* hit bind and force route to CPU */
-+      CRSN_24 = 0x18,
-+      CRSN_25 = 0x19,
-+};
-+
-+enum {
-+      FE_PSE_PORT_CDM1,
-+      FE_PSE_PORT_GDM1,
-+      FE_PSE_PORT_GDM2,
-+      FE_PSE_PORT_GDM3,
-+      FE_PSE_PORT_PPE1,
-+      FE_PSE_PORT_CDM2,
-+      FE_PSE_PORT_CDM3,
-+      FE_PSE_PORT_CDM4,
-+      FE_PSE_PORT_PPE2,
-+      FE_PSE_PORT_GDM4,
-+      FE_PSE_PORT_CDM5,
-+      FE_PSE_PORT_DROP = 0xf,
-+};
-+
-+enum tx_sched_mode {
-+      TC_SCH_WRR8,
-+      TC_SCH_SP,
-+      TC_SCH_WRR7,
-+      TC_SCH_WRR6,
-+      TC_SCH_WRR5,
-+      TC_SCH_WRR4,
-+      TC_SCH_WRR3,
-+      TC_SCH_WRR2,
-+};
-+
-+enum trtcm_param_type {
-+      TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
-+      TRTCM_TOKEN_RATE_MODE,
-+      TRTCM_BUCKETSIZE_SHIFT_MODE,
-+      TRTCM_BUCKET_COUNTER_MODE,
-+};
-+
-+enum trtcm_mode_type {
-+      TRTCM_COMMIT_MODE,
-+      TRTCM_PEAK_MODE,
-+};
-+
-+enum trtcm_param {
-+      TRTCM_TICK_SEL = BIT(0),
-+      TRTCM_PKT_MODE = BIT(1),
-+      TRTCM_METER_MODE = BIT(2),
-+};
-+
-+#define MIN_TOKEN_SIZE                                4096
-+#define MAX_TOKEN_SIZE_OFFSET                 17
-+#define TRTCM_TOKEN_RATE_MASK                 GENMASK(23, 6)
-+#define TRTCM_TOKEN_RATE_FRACTION_MASK                GENMASK(5, 0)
-+
-+struct airoha_queue_entry {
-+      union {
-+              void *buf;
-+              struct sk_buff *skb;
-+      };
-+      dma_addr_t dma_addr;
-+      u16 dma_len;
-+};
-+
-+struct airoha_queue {
-+      struct airoha_qdma *qdma;
-+
-+      /* protect concurrent queue accesses */
-+      spinlock_t lock;
-+      struct airoha_queue_entry *entry;
-+      struct airoha_qdma_desc *desc;
-+      u16 head;
-+      u16 tail;
-+
-+      int queued;
-+      int ndesc;
-+      int free_thr;
-+      int buf_size;
-+
-+      struct napi_struct napi;
-+      struct page_pool *page_pool;
-+};
-+
-+struct airoha_tx_irq_queue {
-+      struct airoha_qdma *qdma;
-+
-+      struct napi_struct napi;
-+
-+      int size;
-+      u32 *q;
-+};
-+
-+struct airoha_hw_stats {
-+      /* protect concurrent hw_stats accesses */
-+      spinlock_t lock;
-+      struct u64_stats_sync syncp;
-+
-+      /* get_stats64 */
-+      u64 rx_ok_pkts;
-+      u64 tx_ok_pkts;
-+      u64 rx_ok_bytes;
-+      u64 tx_ok_bytes;
-+      u64 rx_multicast;
-+      u64 rx_errors;
-+      u64 rx_drops;
-+      u64 tx_drops;
-+      u64 rx_crc_error;
-+      u64 rx_over_errors;
-+      /* ethtool stats */
-+      u64 tx_broadcast;
-+      u64 tx_multicast;
-+      u64 tx_len[7];
-+      u64 rx_broadcast;
-+      u64 rx_fragment;
-+      u64 rx_jabber;
-+      u64 rx_len[7];
-+};
-+
-+struct airoha_qdma {
-+      struct airoha_eth *eth;
-+      void __iomem *regs;
-+
-+      /* protect concurrent irqmask accesses */
-+      spinlock_t irq_lock;
-+      u32 irqmask[QDMA_INT_REG_MAX];
-+      int irq;
-+
-+      struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
-+
-+      struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
-+      struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-+
-+      /* descriptor and packet buffers for qdma hw forward */
-+      struct {
-+              void *desc;
-+              void *q;
-+      } hfwd;
-+};
-+
-+struct airoha_gdm_port {
-+      struct airoha_qdma *qdma;
-+      struct net_device *dev;
-+      int id;
-+
-+      struct airoha_hw_stats stats;
-+
-+      DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
-+
-+      /* qos stats counters */
-+      u64 cpu_tx_packets;
-+      u64 fwd_tx_packets;
-+};
-+
-+struct airoha_eth {
-+      struct device *dev;
-+
-+      unsigned long state;
-+      void __iomem *fe_regs;
-+
-+      struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
-+      struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
-+
-+      struct net_device *napi_dev;
-+
-+      struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
-+      struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
-+};
-+
-+static u32 airoha_rr(void __iomem *base, u32 offset)
-+{
-+      return readl(base + offset);
-+}
-+
-+static void airoha_wr(void __iomem *base, u32 offset, u32 val)
-+{
-+      writel(val, base + offset);
-+}
-+
-+static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
-+{
-+      val |= (airoha_rr(base, offset) & ~mask);
-+      airoha_wr(base, offset, val);
-+
-+      return val;
-+}
-+
-+#define airoha_fe_rr(eth, offset)                             \
-+      airoha_rr((eth)->fe_regs, (offset))
-+#define airoha_fe_wr(eth, offset, val)                                \
-+      airoha_wr((eth)->fe_regs, (offset), (val))
-+#define airoha_fe_rmw(eth, offset, mask, val)                 \
-+      airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
-+#define airoha_fe_set(eth, offset, val)                               \
-+      airoha_rmw((eth)->fe_regs, (offset), 0, (val))
-+#define airoha_fe_clear(eth, offset, val)                     \
-+      airoha_rmw((eth)->fe_regs, (offset), (val), 0)
-+
-+#define airoha_qdma_rr(qdma, offset)                          \
-+      airoha_rr((qdma)->regs, (offset))
-+#define airoha_qdma_wr(qdma, offset, val)                     \
-+      airoha_wr((qdma)->regs, (offset), (val))
-+#define airoha_qdma_rmw(qdma, offset, mask, val)              \
-+      airoha_rmw((qdma)->regs, (offset), (mask), (val))
-+#define airoha_qdma_set(qdma, offset, val)                    \
-+      airoha_rmw((qdma)->regs, (offset), 0, (val))
-+#define airoha_qdma_clear(qdma, offset, val)                  \
-+      airoha_rmw((qdma)->regs, (offset), (val), 0)
-+
-+static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
-+                                  u32 clear, u32 set)
-+{
-+      unsigned long flags;
-+
-+      if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
-+              return;
-+
-+      spin_lock_irqsave(&qdma->irq_lock, flags);
-+
-+      qdma->irqmask[index] &= ~clear;
-+      qdma->irqmask[index] |= set;
-+      airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
-+      /* Read irq_enable register in order to guarantee the update above
-+       * completes in the spinlock critical section.
-+       */
-+      airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
-+
-+      spin_unlock_irqrestore(&qdma->irq_lock, flags);
-+}
-+
-+static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
-+                                 u32 mask)
-+{
-+      airoha_qdma_set_irqmask(qdma, index, 0, mask);
-+}
-+
-+static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
-+                                  u32 mask)
-+{
-+      airoha_qdma_set_irqmask(qdma, index, mask, 0);
-+}
-+
-+static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
-+{
-+      /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
-+       * GDM{2,3,4} can be used as wan port connected to an external
-+       * phy module.
-+       */
-+      return port->id == 1;
-+}
-+
-+static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
-+{
-+      struct airoha_eth *eth = port->qdma->eth;
-+      u32 val, reg;
-+
-+      reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
-+                                         : REG_FE_WAN_MAC_H;
-+      val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
-+      airoha_fe_wr(eth, reg, val);
-+
-+      val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
-+      airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
-+      airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
-+}
-+
-+static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
-+                                      u32 val)
-+{
-+      airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
-+                    FIELD_PREP(GDM_OCFQ_MASK, val));
-+      airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
-+                    FIELD_PREP(GDM_MCFQ_MASK, val));
-+      airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
-+                    FIELD_PREP(GDM_BCFQ_MASK, val));
-+      airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
-+                    FIELD_PREP(GDM_UCFQ_MASK, val));
-+}
-+
-+static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
-+{
-+      u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
-+      u32 vip_port, cfg_addr;
-+
-+      switch (port) {
-+      case XSI_PCIE0_PORT:
-+              vip_port = XSI_PCIE0_VIP_PORT_MASK;
-+              cfg_addr = REG_GDM_FWD_CFG(3);
-+              break;
-+      case XSI_PCIE1_PORT:
-+              vip_port = XSI_PCIE1_VIP_PORT_MASK;
-+              cfg_addr = REG_GDM_FWD_CFG(3);
-+              break;
-+      case XSI_USB_PORT:
-+              vip_port = XSI_USB_VIP_PORT_MASK;
-+              cfg_addr = REG_GDM_FWD_CFG(4);
-+              break;
-+      case XSI_ETH_PORT:
-+              vip_port = XSI_ETH_VIP_PORT_MASK;
-+              cfg_addr = REG_GDM_FWD_CFG(4);
-+              break;
-+      default:
-+              return -EINVAL;
-+      }
-+
-+      if (enable) {
-+              airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
-+              airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
-+      } else {
-+              airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
-+              airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
-+      }
-+
-+      airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
-+
-+      return 0;
-+}
-+
-+static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
-+{
-+      const int port_list[] = {
-+              XSI_PCIE0_PORT,
-+              XSI_PCIE1_PORT,
-+              XSI_USB_PORT,
-+              XSI_ETH_PORT
-+      };
-+      int i, err;
-+
-+      for (i = 0; i < ARRAY_SIZE(port_list); i++) {
-+              err = airoha_set_gdm_port(eth, port_list[i], enable);
-+              if (err)
-+                      goto error;
-+      }
-+
-+      return 0;
-+
-+error:
-+      for (i--; i >= 0; i--)
-+              airoha_set_gdm_port(eth, port_list[i], false);
-+
-+      return err;
-+}
-+
-+static void airoha_fe_maccr_init(struct airoha_eth *eth)
-+{
-+      int p;
-+
-+      for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
-+              airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
-+                            GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
-+                            GDM_DROP_CRC_ERR);
-+              airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
-+                                          FE_PSE_PORT_CDM1);
-+              airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
-+                            GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
-+                            FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
-+                            FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
-+      }
-+
-+      airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
-+                    FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
-+
-+      airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
-+}
-+
-+static void airoha_fe_vip_setup(struct airoha_eth *eth)
-+{
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
-+
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(4),
-+                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
-+                   PATN_EN_MASK);
-+
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(6),
-+                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
-+                   PATN_EN_MASK);
-+
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(7),
-+                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
-+                   PATN_EN_MASK);
-+
-+      /* BOOTP (0x43) */
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(8),
-+                   PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
-+                   FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
-+
-+      /* BOOTP (0x44) */
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(9),
-+                   PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
-+                   FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
-+
-+      /* ISAKMP */
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(10),
-+                   PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
-+                   FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
-+
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(11),
-+                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
-+                   PATN_EN_MASK);
-+
-+      /* DHCPv6 */
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(12),
-+                   PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
-+                   FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
-+
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(19),
-+                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
-+                   PATN_EN_MASK);
-+
-+      /* ETH->ETH_P_1905 (0x893a) */
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(20),
-+                   PATN_FCPU_EN_MASK | PATN_EN_MASK);
-+
-+      airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
-+      airoha_fe_wr(eth, REG_FE_VIP_EN(21),
-+                   PATN_FCPU_EN_MASK | PATN_EN_MASK);
-+}
-+
-+static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
-+                                           u32 port, u32 queue)
-+{
-+      u32 val;
-+
-+      airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
-+                    PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
-+                    FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
-+                    FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
-+      val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
-+
-+      return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
-+}
-+
-+static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
-+                                            u32 port, u32 queue, u32 val)
-+{
-+      airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
-+                    FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
-+      airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
-+                    PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
-+                    PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
-+                    FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
-+                    FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
-+                    PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
-+}
-+
-+static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
-+{
-+      u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
-+
-+      return FIELD_GET(PSE_ALLRSV_MASK, val);
-+}
-+
-+static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
-+                                  u32 port, u32 queue, u32 val)
-+{
-+      u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
-+      u32 tmp, all_rsv, fq_limit;
-+
-+      airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
-+
-+      /* modify all rsv */
-+      all_rsv = airoha_fe_get_pse_all_rsv(eth);
-+      all_rsv += (val - orig_val);
-+      airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
-+                    FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
-+
-+      /* modify hthd */
-+      tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
-+      fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
-+      tmp = fq_limit - all_rsv - 0x20;
-+      airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
-+                    PSE_SHARE_USED_HTHD_MASK,
-+                    FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
-+
-+      tmp = fq_limit - all_rsv - 0x100;
-+      airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
-+                    PSE_SHARE_USED_MTHD_MASK,
-+                    FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
-+      tmp = (3 * tmp) >> 2;
-+      airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
-+                    PSE_SHARE_USED_LTHD_MASK,
-+                    FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
-+
-+      return 0;
-+}
-+
-+static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
-+{
-+      const u32 pse_port_num_queues[] = {
-+              [FE_PSE_PORT_CDM1] = 6,
-+              [FE_PSE_PORT_GDM1] = 6,
-+              [FE_PSE_PORT_GDM2] = 32,
-+              [FE_PSE_PORT_GDM3] = 6,
-+              [FE_PSE_PORT_PPE1] = 4,
-+              [FE_PSE_PORT_CDM2] = 6,
-+              [FE_PSE_PORT_CDM3] = 8,
-+              [FE_PSE_PORT_CDM4] = 10,
-+              [FE_PSE_PORT_PPE2] = 4,
-+              [FE_PSE_PORT_GDM4] = 2,
-+              [FE_PSE_PORT_CDM5] = 2,
-+      };
-+      u32 all_rsv;
-+      int q;
-+
-+      all_rsv = airoha_fe_get_pse_all_rsv(eth);
-+      /* hw misses PPE2 oq rsv */
-+      all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
-+      airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
-+
-+      /* CMD1 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+      /* GMD1 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+      /* GMD2 */
-+      for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
-+      /* GMD3 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+      /* PPE1 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
-+              if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
-+                      airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
-+                                               PSE_QUEUE_RSV_PAGES);
-+              else
-+                      airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
-+      }
-+      /* CDM2 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+      /* CDM3 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
-+      /* CDM4 */
-+      for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+      /* PPE2 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
-+              if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
-+                      airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
-+                                               PSE_QUEUE_RSV_PAGES);
-+              else
-+                      airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
-+      }
-+      /* GMD4 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+      /* CDM5 */
-+      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
-+              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
-+                                       PSE_QUEUE_RSV_PAGES);
-+}
-+
-+static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
-+{
-+      int i;
-+
-+      for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
-+              int err, j;
-+              u32 val;
-+
-+              airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
-+
-+              val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
-+                    MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
-+              airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
-+              err = read_poll_timeout(airoha_fe_rr, val,
-+                                      val & MC_VLAN_CFG_CMD_DONE_MASK,
-+                                      USEC_PER_MSEC, 5 * USEC_PER_MSEC,
-+                                      false, eth, REG_MC_VLAN_CFG);
-+              if (err)
-+                      return err;
-+
-+              for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
-+                      airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
-+
-+                      val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
-+                            FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
-+                            MC_VLAN_CFG_RW_MASK;
-+                      airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
-+                      err = read_poll_timeout(airoha_fe_rr, val,
-+                                              val & MC_VLAN_CFG_CMD_DONE_MASK,
-+                                              USEC_PER_MSEC,
-+                                              5 * USEC_PER_MSEC, false, eth,
-+                                              REG_MC_VLAN_CFG);
-+                      if (err)
-+                              return err;
-+              }
-+      }
-+
-+      return 0;
-+}
-+
-+static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
-+{
-+      /* CDM1_CRSN_QSEL */
-+      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
-+                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
-+                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
-+                               CDM_CRSN_QSEL_Q1));
-+      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
-+                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
-+                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
-+                               CDM_CRSN_QSEL_Q1));
-+      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
-+                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
-+                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
-+                               CDM_CRSN_QSEL_Q1));
-+      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
-+                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
-+                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
-+                               CDM_CRSN_QSEL_Q6));
-+      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
-+                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
-+                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
-+                               CDM_CRSN_QSEL_Q1));
-+      /* CDM2_CRSN_QSEL */
-+      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
-+                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
-+                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
-+                               CDM_CRSN_QSEL_Q1));
-+      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
-+                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
-+                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
-+                               CDM_CRSN_QSEL_Q1));
-+      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
-+                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
-+                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
-+                               CDM_CRSN_QSEL_Q1));
-+      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
-+                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
-+                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
-+                               CDM_CRSN_QSEL_Q6));
-+      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
-+                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
-+                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
-+                               CDM_CRSN_QSEL_Q1));
-+}
-+
-+static int airoha_fe_init(struct airoha_eth *eth)
-+{
-+      airoha_fe_maccr_init(eth);
-+
-+      /* PSE IQ reserve */
-+      airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
-+                    FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
-+      airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
-+                    PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
-+                    FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
-+                    FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
-+
-+      /* enable FE copy engine for MC/KA/DPI */
-+      airoha_fe_wr(eth, REG_FE_PCE_CFG,
-+                   PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
-+      /* set vip queue selection to ring 1 */
-+      airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
-+                    FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
-+      airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
-+                    FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
-+      /* set GDM4 source interface offset to 8 */
-+      airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
-+                    GDM4_SPORT_OFF2_MASK |
-+                    GDM4_SPORT_OFF1_MASK |
-+                    GDM4_SPORT_OFF0_MASK,
-+                    FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
-+                    FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
-+                    FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
-+
-+      /* set PSE Page as 128B */
-+      airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
-+                    FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
-+                    FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
-+                    FE_DMA_GLO_PG_SZ_MASK);
-+      airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
-+                   FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
-+                   FE_RST_GDM4_MBI_ARB_MASK);
-+      usleep_range(1000, 2000);
-+
-+      /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
-+       * connect other rings to PSE Port0 OQ-0
-+       */
-+      airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
-+      airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
-+      airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
-+      airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
-+
-+      airoha_fe_vip_setup(eth);
-+      airoha_fe_pse_ports_init(eth);
-+
-+      airoha_fe_set(eth, REG_GDM_MISC_CFG,
-+                    GDM2_RDM_ACK_WAIT_PREF_MASK |
-+                    GDM2_CHN_VLD_MODE_MASK);
-+      airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK,
-+                    FIELD_PREP(CDM2_OAM_QSEL_MASK, 15));
-+
-+      /* init fragment and assemble Force Port */
-+      /* NPU Core-3, NPU Bridge Channel-3 */
-+      airoha_fe_rmw(eth, REG_IP_FRAG_FP,
-+                    IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
-+                    FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
-+                    FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
-+      /* QDMA LAN, RX Ring-22 */
-+      airoha_fe_rmw(eth, REG_IP_FRAG_FP,
-+                    IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
-+                    FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
-+                    FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
-+
-+      airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
-+      airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
-+
-+      airoha_fe_crsn_qsel_init(eth);
-+
-+      airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
-+      airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
-+
-+      /* default aging mode for mbi unlock issue */
-+      airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
-+                    MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
-+                    FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
-+                    FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
-+
-+      /* disable IFC by default */
-+      airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
-+
-+      /* enable 1:N vlan action, init vlan table */
-+      airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
-+
-+      return airoha_fe_mc_vlan_clear(eth);
-+}
-+
-+static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
-+{
-+      enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
-+      struct airoha_qdma *qdma = q->qdma;
-+      struct airoha_eth *eth = qdma->eth;
-+      int qid = q - &qdma->q_rx[0];
-+      int nframes = 0;
-+
-+      while (q->queued < q->ndesc - 1) {
-+              struct airoha_queue_entry *e = &q->entry[q->head];
-+              struct airoha_qdma_desc *desc = &q->desc[q->head];
-+              struct page *page;
-+              int offset;
-+              u32 val;
-+
-+              page = page_pool_dev_alloc_frag(q->page_pool, &offset,
-+                                              q->buf_size);
-+              if (!page)
-+                      break;
-+
-+              q->head = (q->head + 1) % q->ndesc;
-+              q->queued++;
-+              nframes++;
-+
-+              e->buf = page_address(page) + offset;
-+              e->dma_addr = page_pool_get_dma_addr(page) + offset;
-+              e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
-+
-+              dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
-+                                         dir);
-+
-+              val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
-+              WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
-+              WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
-+              val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
-+              WRITE_ONCE(desc->data, cpu_to_le32(val));
-+              WRITE_ONCE(desc->msg0, 0);
-+              WRITE_ONCE(desc->msg1, 0);
-+              WRITE_ONCE(desc->msg2, 0);
-+              WRITE_ONCE(desc->msg3, 0);
-+
-+              airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
-+                              RX_RING_CPU_IDX_MASK,
-+                              FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
-+      }
-+
-+      return nframes;
-+}
-+
-+static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
-+                                  struct airoha_qdma_desc *desc)
-+{
-+      u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
-+
-+      sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
-+      switch (sport) {
-+      case 0x10 ... 0x13:
-+              port = 0;
-+              break;
-+      case 0x2 ... 0x4:
-+              port = sport - 1;
-+              break;
-+      default:
-+              return -EINVAL;
-+      }
-+
-+      return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
-+}
-+
-+static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
-+{
-+      enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
-+      struct airoha_qdma *qdma = q->qdma;
-+      struct airoha_eth *eth = qdma->eth;
-+      int qid = q - &qdma->q_rx[0];
-+      int done = 0;
-+
-+      while (done < budget) {
-+              struct airoha_queue_entry *e = &q->entry[q->tail];
-+              struct airoha_qdma_desc *desc = &q->desc[q->tail];
-+              dma_addr_t dma_addr = le32_to_cpu(desc->addr);
-+              u32 desc_ctrl = le32_to_cpu(desc->ctrl);
-+              struct sk_buff *skb;
-+              int len, p;
-+
-+              if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
-+                      break;
-+
-+              if (!dma_addr)
-+                      break;
-+
-+              len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
-+              if (!len)
-+                      break;
-+
-+              q->tail = (q->tail + 1) % q->ndesc;
-+              q->queued--;
-+
-+              dma_sync_single_for_cpu(eth->dev, dma_addr,
-+                                      SKB_WITH_OVERHEAD(q->buf_size), dir);
-+
-+              p = airoha_qdma_get_gdm_port(eth, desc);
-+              if (p < 0 || !eth->ports[p]) {
-+                      page_pool_put_full_page(q->page_pool,
-+                                              virt_to_head_page(e->buf),
-+                                              true);
-+                      continue;
-+              }
-+
-+              skb = napi_build_skb(e->buf, q->buf_size);
-+              if (!skb) {
-+                      page_pool_put_full_page(q->page_pool,
-+                                              virt_to_head_page(e->buf),
-+                                              true);
-+                      break;
-+              }
-+
-+              skb_reserve(skb, 2);
-+              __skb_put(skb, len);
-+              skb_mark_for_recycle(skb);
-+              skb->dev = eth->ports[p]->dev;
-+              skb->protocol = eth_type_trans(skb, skb->dev);
-+              skb->ip_summed = CHECKSUM_UNNECESSARY;
-+              skb_record_rx_queue(skb, qid);
-+              napi_gro_receive(&q->napi, skb);
-+
-+              done++;
-+      }
-+      airoha_qdma_fill_rx_queue(q);
-+
-+      return done;
-+}
-+
-+static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
-+{
-+      struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
-+      int cur, done = 0;
-+
-+      do {
-+              cur = airoha_qdma_rx_process(q, budget - done);
-+              done += cur;
-+      } while (cur && done < budget);
-+
-+      if (done < budget && napi_complete(napi))
-+              airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
-+                                     RX_DONE_INT_MASK);
-+
-+      return done;
-+}
-+
-+static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
-+                                   struct airoha_qdma *qdma, int ndesc)
-+{
-+      const struct page_pool_params pp_params = {
-+              .order = 0,
-+              .pool_size = 256,
-+              .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
-+                       PP_FLAG_PAGE_FRAG,
-+              .dma_dir = DMA_FROM_DEVICE,
-+              .max_len = PAGE_SIZE,
-+              .nid = NUMA_NO_NODE,
-+              .dev = qdma->eth->dev,
-+              .napi = &q->napi,
-+      };
-+      struct airoha_eth *eth = qdma->eth;
-+      int qid = q - &qdma->q_rx[0], thr;
-+      dma_addr_t dma_addr;
-+
-+      q->buf_size = PAGE_SIZE / 2;
-+      q->ndesc = ndesc;
-+      q->qdma = qdma;
-+
-+      q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
-+                              GFP_KERNEL);
-+      if (!q->entry)
-+              return -ENOMEM;
-+
-+      q->page_pool = page_pool_create(&pp_params);
-+      if (IS_ERR(q->page_pool)) {
-+              int err = PTR_ERR(q->page_pool);
-+
-+              q->page_pool = NULL;
-+              return err;
-+      }
-+
-+      q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
-+                                    &dma_addr, GFP_KERNEL);
-+      if (!q->desc)
-+              return -ENOMEM;
-+
-+      netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
-+
-+      airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
-+      airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
-+                      RX_RING_SIZE_MASK,
-+                      FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
-+
-+      thr = clamp(ndesc >> 3, 1, 32);
-+      airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
-+                      FIELD_PREP(RX_RING_THR_MASK, thr));
-+      airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
-+                      FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
-+
-+      airoha_qdma_fill_rx_queue(q);
-+
-+      return 0;
-+}
-+
-+static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
-+{
-+      struct airoha_eth *eth = q->qdma->eth;
-+
-+      while (q->queued) {
-+              struct airoha_queue_entry *e = &q->entry[q->tail];
-+              struct page *page = virt_to_head_page(e->buf);
-+
-+              dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
-+                                      page_pool_get_dma_dir(q->page_pool));
-+              page_pool_put_full_page(q->page_pool, page, false);
-+              q->tail = (q->tail + 1) % q->ndesc;
-+              q->queued--;
-+      }
-+}
-+
-+static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-+              int err;
-+
-+              if (!(RX_DONE_INT_MASK & BIT(i))) {
-+                      /* rx-queue not binded to irq */
-+                      continue;
-+              }
-+
-+              err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
-+                                              RX_DSCP_NUM(i));
-+              if (err)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
-+{
-+      struct airoha_tx_irq_queue *irq_q;
-+      int id, done = 0, irq_queued;
-+      struct airoha_qdma *qdma;
-+      struct airoha_eth *eth;
-+      u32 status, head;
-+
-+      irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
-+      qdma = irq_q->qdma;
-+      id = irq_q - &qdma->q_tx_irq[0];
-+      eth = qdma->eth;
-+
-+      status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
-+      head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
-+      head = head % irq_q->size;
-+      irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
-+
-+      while (irq_queued > 0 && done < budget) {
-+              u32 qid, val = irq_q->q[head];
-+              struct airoha_qdma_desc *desc;
-+              struct airoha_queue_entry *e;
-+              struct airoha_queue *q;
-+              u32 index, desc_ctrl;
-+              struct sk_buff *skb;
-+
-+              if (val == 0xff)
-+                      break;
-+
-+              irq_q->q[head] = 0xff; /* mark as done */
-+              head = (head + 1) % irq_q->size;
-+              irq_queued--;
-+              done++;
-+
-+              qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
-+              if (qid >= ARRAY_SIZE(qdma->q_tx))
-+                      continue;
-+
-+              q = &qdma->q_tx[qid];
-+              if (!q->ndesc)
-+                      continue;
-+
-+              index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
-+              if (index >= q->ndesc)
-+                      continue;
-+
-+              spin_lock_bh(&q->lock);
-+
-+              if (!q->queued)
-+                      goto unlock;
-+
-+              desc = &q->desc[index];
-+              desc_ctrl = le32_to_cpu(desc->ctrl);
-+
-+              if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
-+                  !(desc_ctrl & QDMA_DESC_DROP_MASK))
-+                      goto unlock;
-+
-+              e = &q->entry[index];
-+              skb = e->skb;
-+
-+              dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
-+                               DMA_TO_DEVICE);
-+              memset(e, 0, sizeof(*e));
-+              WRITE_ONCE(desc->msg0, 0);
-+              WRITE_ONCE(desc->msg1, 0);
-+              q->queued--;
-+
-+              /* completion ring can report out-of-order indexes if hw QoS
-+               * is enabled and packets with different priority are queued
-+               * to same DMA ring. Take into account possible out-of-order
-+               * reports incrementing DMA ring tail pointer
-+               */
-+              while (q->tail != q->head && !q->entry[q->tail].dma_addr)
-+                      q->tail = (q->tail + 1) % q->ndesc;
-+
-+              if (skb) {
-+                      u16 queue = skb_get_queue_mapping(skb);
-+                      struct netdev_queue *txq;
-+
-+                      txq = netdev_get_tx_queue(skb->dev, queue);
-+                      netdev_tx_completed_queue(txq, 1, skb->len);
-+                      if (netif_tx_queue_stopped(txq) &&
-+                          q->ndesc - q->queued >= q->free_thr)
-+                              netif_tx_wake_queue(txq);
-+
-+                      dev_kfree_skb_any(skb);
-+              }
-+unlock:
-+              spin_unlock_bh(&q->lock);
-+      }
-+
-+      if (done) {
-+              int i, len = done >> 7;
-+
-+              for (i = 0; i < len; i++)
-+                      airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
-+                                      IRQ_CLEAR_LEN_MASK, 0x80);
-+              airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
-+                              IRQ_CLEAR_LEN_MASK, (done & 0x7f));
-+      }
-+
-+      if (done < budget && napi_complete(napi))
-+              airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
-+                                     TX_DONE_INT_MASK(id));
-+
-+      return done;
-+}
-+
-+static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
-+                                   struct airoha_qdma *qdma, int size)
-+{
-+      struct airoha_eth *eth = qdma->eth;
-+      int i, qid = q - &qdma->q_tx[0];
-+      dma_addr_t dma_addr;
-+
-+      spin_lock_init(&q->lock);
-+      q->ndesc = size;
-+      q->qdma = qdma;
-+      q->free_thr = 1 + MAX_SKB_FRAGS;
-+
-+      q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
-+                              GFP_KERNEL);
-+      if (!q->entry)
-+              return -ENOMEM;
-+
-+      q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
-+                                    &dma_addr, GFP_KERNEL);
-+      if (!q->desc)
-+              return -ENOMEM;
-+
-+      for (i = 0; i < q->ndesc; i++) {
-+              u32 val;
-+
-+              val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
-+              WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
-+      }
-+
-+      /* xmit ring drop default setting */
-+      airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
-+                      TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
-+
-+      airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
-+      airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
-+                      FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
-+      airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
-+                      FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
-+
-+      return 0;
-+}
-+
-+static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
-+                                 struct airoha_qdma *qdma, int size)
-+{
-+      int id = irq_q - &qdma->q_tx_irq[0];
-+      struct airoha_eth *eth = qdma->eth;
-+      dma_addr_t dma_addr;
-+
-+      netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
-+                        airoha_qdma_tx_napi_poll);
-+      irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
-+                                     &dma_addr, GFP_KERNEL);
-+      if (!irq_q->q)
-+              return -ENOMEM;
-+
-+      memset(irq_q->q, 0xff, size * sizeof(u32));
-+      irq_q->size = size;
-+      irq_q->qdma = qdma;
-+
-+      airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
-+      airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
-+                      FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
-+      airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
-+                      FIELD_PREP(TX_IRQ_THR_MASK, 1));
-+
-+      return 0;
-+}
-+
-+static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
-+{
-+      int i, err;
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
-+              err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
-+                                            IRQ_QUEUE_LEN(i));
-+              if (err)
-+                      return err;
-+      }
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-+              err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
-+                                              TX_DSCP_NUM);
-+              if (err)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
-+{
-+      struct airoha_eth *eth = q->qdma->eth;
-+
-+      spin_lock_bh(&q->lock);
-+      while (q->queued) {
-+              struct airoha_queue_entry *e = &q->entry[q->tail];
-+
-+              dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
-+                               DMA_TO_DEVICE);
-+              dev_kfree_skb_any(e->skb);
-+              e->skb = NULL;
-+
-+              q->tail = (q->tail + 1) % q->ndesc;
-+              q->queued--;
-+      }
-+      spin_unlock_bh(&q->lock);
-+}
-+
-+static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
-+{
-+      struct airoha_eth *eth = qdma->eth;
-+      dma_addr_t dma_addr;
-+      u32 status;
-+      int size;
-+
-+      size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
-+      qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
-+                                            GFP_KERNEL);
-+      if (!qdma->hfwd.desc)
-+              return -ENOMEM;
-+
-+      airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
-+
-+      size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
-+      qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
-+                                         GFP_KERNEL);
-+      if (!qdma->hfwd.q)
-+              return -ENOMEM;
-+
-+      airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
-+
-+      airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
-+                      HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
-+                      FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
-+      airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
-+                      FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
-+      airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
-+                      LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
-+                      HW_FWD_DESC_NUM_MASK,
-+                      FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
-+                      LMGR_INIT_START);
-+
-+      return read_poll_timeout(airoha_qdma_rr, status,
-+                               !(status & LMGR_INIT_START), USEC_PER_MSEC,
-+                               30 * USEC_PER_MSEC, true, qdma,
-+                               REG_LMGR_INIT_CFG);
-+}
-+
-+static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
-+{
-+      airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
-+      airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
-+
-+      airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
-+                        PSE_BUF_ESTIMATE_EN_MASK);
-+
-+      airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
-+                      EGRESS_RATE_METER_EN_MASK |
-+                      EGRESS_RATE_METER_EQ_RATE_EN_MASK);
-+      /* 2047us x 31 = 63.457ms */
-+      airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
-+                      EGRESS_RATE_METER_WINDOW_SZ_MASK,
-+                      FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
-+      airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
-+                      EGRESS_RATE_METER_TIMESLICE_MASK,
-+                      FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
-+
-+      /* ratelimit init */
-+      airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
-+      /* fast-tick 25us */
-+      airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
-+                      FIELD_PREP(GLB_FAST_TICK_MASK, 25));
-+      airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
-+                      FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
-+
-+      airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
-+      airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
-+                      FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
-+      airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
-+                      EGRESS_SLOW_TICK_RATIO_MASK,
-+                      FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
-+
-+      airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
-+      airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
-+                        INGRESS_TRTCM_MODE_MASK);
-+      airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
-+                      FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
-+      airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
-+                      INGRESS_SLOW_TICK_RATIO_MASK,
-+                      FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
-+
-+      airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
-+      airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
-+                      FIELD_PREP(SLA_FAST_TICK_MASK, 25));
-+      airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
-+                      FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
-+}
-+
-+static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
-+{
-+      int i;
-+
-+      for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
-+              /* Tx-cpu transferred count */
-+              airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
-+              airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
-+                             CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
-+                             CNTR_ALL_DSCP_RING_EN_MASK |
-+                             FIELD_PREP(CNTR_CHAN_MASK, i));
-+              /* Tx-fwd transferred count */
-+              airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
-+              airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
-+                             CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
-+                             CNTR_ALL_DSCP_RING_EN_MASK |
-+                             FIELD_PREP(CNTR_SRC_MASK, 1) |
-+                             FIELD_PREP(CNTR_CHAN_MASK, i));
-+      }
-+}
-+
-+static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
-+{
-+      int i;
-+
-+      /* clear pending irqs */
-+      for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
-+              airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
-+
-+      /* setup irqs */
-+      airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
-+      airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
-+      airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
-+
-+      /* setup irq binding */
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-+              if (!qdma->q_tx[i].ndesc)
-+                      continue;
-+
-+              if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
-+                      airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
-+                                      TX_RING_IRQ_BLOCKING_CFG_MASK);
-+              else
-+                      airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
-+                                        TX_RING_IRQ_BLOCKING_CFG_MASK);
-+      }
-+
-+      airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
-+                     GLOBAL_CFG_RX_2B_OFFSET_MASK |
-+                     FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
-+                     GLOBAL_CFG_CPU_TXR_RR_MASK |
-+                     GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
-+                     GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
-+                     GLOBAL_CFG_MULTICAST_EN_MASK |
-+                     GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
-+                     GLOBAL_CFG_TX_WB_DONE_MASK |
-+                     FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
-+
-+      airoha_qdma_init_qos(qdma);
-+
-+      /* disable qdma rx delay interrupt */
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-+              if (!qdma->q_rx[i].ndesc)
-+                      continue;
-+
-+              airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
-+                                RX_DELAY_INT_MASK);
-+      }
-+
-+      airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
-+                      TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
-+      airoha_qdma_init_qos_stats(qdma);
-+
-+      return 0;
-+}
-+
-+static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
-+{
-+      struct airoha_qdma *qdma = dev_instance;
-+      u32 intr[ARRAY_SIZE(qdma->irqmask)];
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
-+              intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
-+              intr[i] &= qdma->irqmask[i];
-+              airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
-+      }
-+
-+      if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
-+              return IRQ_NONE;
-+
-+      if (intr[1] & RX_DONE_INT_MASK) {
-+              airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
-+                                      RX_DONE_INT_MASK);
-+
-+              for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-+                      if (!qdma->q_rx[i].ndesc)
-+                              continue;
-+
-+                      if (intr[1] & BIT(i))
-+                              napi_schedule(&qdma->q_rx[i].napi);
-+              }
-+      }
-+
-+      if (intr[0] & INT_TX_MASK) {
-+              for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
-+                      if (!(intr[0] & TX_DONE_INT_MASK(i)))
-+                              continue;
-+
-+                      airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
-+                                              TX_DONE_INT_MASK(i));
-+                      napi_schedule(&qdma->q_tx_irq[i].napi);
-+              }
-+      }
-+
-+      return IRQ_HANDLED;
-+}
-+
-+static int airoha_qdma_init(struct platform_device *pdev,
-+                          struct airoha_eth *eth,
-+                          struct airoha_qdma *qdma)
-+{
-+      int err, id = qdma - &eth->qdma[0];
-+      const char *res;
-+
-+      spin_lock_init(&qdma->irq_lock);
-+      qdma->eth = eth;
-+
-+      res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
-+      if (!res)
-+              return -ENOMEM;
-+
-+      qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
-+      if (IS_ERR(qdma->regs))
-+              return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
-+                                   "failed to iomap qdma%d regs\n", id);
-+
-+      qdma->irq = platform_get_irq(pdev, 4 * id);
-+      if (qdma->irq < 0)
-+              return qdma->irq;
-+
-+      err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
-+                             IRQF_SHARED, KBUILD_MODNAME, qdma);
-+      if (err)
-+              return err;
-+
-+      err = airoha_qdma_init_rx(qdma);
-+      if (err)
-+              return err;
-+
-+      err = airoha_qdma_init_tx(qdma);
-+      if (err)
-+              return err;
-+
-+      err = airoha_qdma_init_hfwd_queues(qdma);
-+      if (err)
-+              return err;
-+
-+      return airoha_qdma_hw_init(qdma);
-+}
-+
-+static int airoha_hw_init(struct platform_device *pdev,
-+                        struct airoha_eth *eth)
-+{
-+      int err, i;
-+
-+      /* disable xsi */
-+      err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
-+                                      eth->xsi_rsts);
-+      if (err)
-+              return err;
-+
-+      err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
-+      if (err)
-+              return err;
-+
-+      msleep(20);
-+      err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
-+      if (err)
-+              return err;
-+
-+      msleep(20);
-+      err = airoha_fe_init(eth);
-+      if (err)
-+              return err;
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
-+              err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
-+              if (err)
-+                      return err;
-+      }
-+
-+      set_bit(DEV_STATE_INITIALIZED, &eth->state);
-+
-+      return 0;
-+}
-+
-+static void airoha_hw_cleanup(struct airoha_qdma *qdma)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-+              if (!qdma->q_rx[i].ndesc)
-+                      continue;
-+
-+              netif_napi_del(&qdma->q_rx[i].napi);
-+              airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
-+              if (qdma->q_rx[i].page_pool)
-+                      page_pool_destroy(qdma->q_rx[i].page_pool);
-+      }
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
-+              netif_napi_del(&qdma->q_tx_irq[i].napi);
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-+              if (!qdma->q_tx[i].ndesc)
-+                      continue;
-+
-+              airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
-+      }
-+}
-+
-+static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
-+              napi_enable(&qdma->q_tx_irq[i].napi);
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-+              if (!qdma->q_rx[i].ndesc)
-+                      continue;
-+
-+              napi_enable(&qdma->q_rx[i].napi);
-+      }
-+}
-+
-+static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
-+              napi_disable(&qdma->q_tx_irq[i].napi);
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-+              if (!qdma->q_rx[i].ndesc)
-+                      continue;
-+
-+              napi_disable(&qdma->q_rx[i].napi);
-+      }
-+}
-+
-+static void airoha_update_hw_stats(struct airoha_gdm_port *port)
-+{
-+      struct airoha_eth *eth = port->qdma->eth;
-+      u32 val, i = 0;
-+
-+      spin_lock(&port->stats.lock);
-+      u64_stats_update_begin(&port->stats.syncp);
-+
-+      /* TX */
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
-+      port->stats.tx_ok_pkts += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
-+      port->stats.tx_ok_pkts += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
-+      port->stats.tx_ok_bytes += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
-+      port->stats.tx_ok_bytes += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
-+      port->stats.tx_drops += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
-+      port->stats.tx_broadcast += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
-+      port->stats.tx_multicast += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
-+      port->stats.tx_len[i] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
-+      port->stats.tx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
-+      port->stats.tx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
-+      port->stats.tx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
-+      port->stats.tx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
-+      port->stats.tx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
-+      port->stats.tx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
-+      port->stats.tx_len[i++] += val;
-+
-+      /* RX */
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
-+      port->stats.rx_ok_pkts += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
-+      port->stats.rx_ok_pkts += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
-+      port->stats.rx_ok_bytes += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
-+      port->stats.rx_ok_bytes += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
-+      port->stats.rx_drops += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
-+      port->stats.rx_broadcast += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
-+      port->stats.rx_multicast += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
-+      port->stats.rx_errors += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
-+      port->stats.rx_crc_error += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
-+      port->stats.rx_over_errors += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
-+      port->stats.rx_fragment += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
-+      port->stats.rx_jabber += val;
-+
-+      i = 0;
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
-+      port->stats.rx_len[i] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
-+      port->stats.rx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
-+      port->stats.rx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
-+      port->stats.rx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
-+      port->stats.rx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
-+      port->stats.rx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
-+      port->stats.rx_len[i] += ((u64)val << 32);
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
-+      port->stats.rx_len[i++] += val;
-+
-+      /* reset mib counters */
-+      airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
-+                    FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
-+
-+      u64_stats_update_end(&port->stats.syncp);
-+      spin_unlock(&port->stats.lock);
-+}
-+
-+static int airoha_dev_open(struct net_device *dev)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct airoha_qdma *qdma = port->qdma;
-+      int err;
-+
-+      netif_tx_start_all_queues(dev);
-+      err = airoha_set_gdm_ports(qdma->eth, true);
-+      if (err)
-+              return err;
-+
-+      if (netdev_uses_dsa(dev))
-+              airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
-+                            GDM_STAG_EN_MASK);
-+      else
-+              airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
-+                              GDM_STAG_EN_MASK);
-+
-+      airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
-+                      GLOBAL_CFG_TX_DMA_EN_MASK |
-+                      GLOBAL_CFG_RX_DMA_EN_MASK);
-+
-+      return 0;
-+}
-+
-+static int airoha_dev_stop(struct net_device *dev)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct airoha_qdma *qdma = port->qdma;
-+      int i, err;
-+
-+      netif_tx_disable(dev);
-+      err = airoha_set_gdm_ports(qdma->eth, false);
-+      if (err)
-+              return err;
-+
-+      airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
-+                        GLOBAL_CFG_TX_DMA_EN_MASK |
-+                        GLOBAL_CFG_RX_DMA_EN_MASK);
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-+              if (!qdma->q_tx[i].ndesc)
-+                      continue;
-+
-+              airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
-+              netdev_tx_reset_subqueue(dev, i);
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      int err;
-+
-+      err = eth_mac_addr(dev, p);
-+      if (err)
-+              return err;
-+
-+      airoha_set_macaddr(port, dev->dev_addr);
-+
-+      return 0;
-+}
-+
-+static int airoha_dev_init(struct net_device *dev)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+
-+      airoha_set_macaddr(port, dev->dev_addr);
-+
-+      return 0;
-+}
-+
-+static void airoha_dev_get_stats64(struct net_device *dev,
-+                                 struct rtnl_link_stats64 *storage)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      unsigned int start;
-+
-+      airoha_update_hw_stats(port);
-+      do {
-+              start = u64_stats_fetch_begin(&port->stats.syncp);
-+              storage->rx_packets = port->stats.rx_ok_pkts;
-+              storage->tx_packets = port->stats.tx_ok_pkts;
-+              storage->rx_bytes = port->stats.rx_ok_bytes;
-+              storage->tx_bytes = port->stats.tx_ok_bytes;
-+              storage->multicast = port->stats.rx_multicast;
-+              storage->rx_errors = port->stats.rx_errors;
-+              storage->rx_dropped = port->stats.rx_drops;
-+              storage->tx_dropped = port->stats.tx_drops;
-+              storage->rx_crc_errors = port->stats.rx_crc_error;
-+              storage->rx_over_errors = port->stats.rx_over_errors;
-+      } while (u64_stats_fetch_retry(&port->stats.syncp, start));
-+}
-+
-+static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
-+                                 struct net_device *sb_dev)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      int queue, channel;
-+
-+      /* For dsa device select QoS channel according to the dsa user port
-+       * index, rely on port id otherwise. Select QoS queue based on the
-+       * skb priority.
-+       */
-+      channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
-+      channel = channel % AIROHA_NUM_QOS_CHANNELS;
-+      queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
-+      queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
-+
-+      return queue < dev->num_tx_queues ? queue : 0;
-+}
-+
-+static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
-+                                 struct net_device *dev)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      u32 nr_frags = 1 + skb_shinfo(skb)->nr_frags;
-+      u32 msg0, msg1, len = skb_headlen(skb);
-+      struct airoha_qdma *qdma = port->qdma;
-+      struct netdev_queue *txq;
-+      struct airoha_queue *q;
-+      void *data = skb->data;
-+      int i, qid;
-+      u16 index;
-+      u8 fport;
-+
-+      qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
-+      msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
-+                        qid / AIROHA_NUM_QOS_QUEUES) |
-+             FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
-+                        qid % AIROHA_NUM_QOS_QUEUES);
-+      if (skb->ip_summed == CHECKSUM_PARTIAL)
-+              msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
-+                      FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
-+                      FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
-+
-+      /* TSO: fill MSS info in tcp checksum field */
-+      if (skb_is_gso(skb)) {
-+              if (skb_cow_head(skb, 0))
-+                      goto error;
-+
-+              if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
-+                                               SKB_GSO_TCPV6)) {
-+                      __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
-+
-+                      tcp_hdr(skb)->check = (__force __sum16)csum;
-+                      msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
-+              }
-+      }
-+
-+      fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
-+      msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
-+             FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
-+
-+      q = &qdma->q_tx[qid];
-+      if (WARN_ON_ONCE(!q->ndesc))
-+              goto error;
-+
-+      spin_lock_bh(&q->lock);
-+
-+      txq = netdev_get_tx_queue(dev, qid);
-+      if (q->queued + nr_frags > q->ndesc) {
-+              /* not enough space in the queue */
-+              netif_tx_stop_queue(txq);
-+              spin_unlock_bh(&q->lock);
-+              return NETDEV_TX_BUSY;
-+      }
-+
-+      index = q->head;
-+      for (i = 0; i < nr_frags; i++) {
-+              struct airoha_qdma_desc *desc = &q->desc[index];
-+              struct airoha_queue_entry *e = &q->entry[index];
-+              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-+              dma_addr_t addr;
-+              u32 val;
-+
-+              addr = dma_map_single(dev->dev.parent, data, len,
-+                                    DMA_TO_DEVICE);
-+              if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
-+                      goto error_unmap;
-+
-+              index = (index + 1) % q->ndesc;
-+
-+              val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
-+              if (i < nr_frags - 1)
-+                      val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
-+              WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
-+              WRITE_ONCE(desc->addr, cpu_to_le32(addr));
-+              val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
-+              WRITE_ONCE(desc->data, cpu_to_le32(val));
-+              WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
-+              WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
-+              WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
-+
-+              e->skb = i ? NULL : skb;
-+              e->dma_addr = addr;
-+              e->dma_len = len;
-+
-+              data = skb_frag_address(frag);
-+              len = skb_frag_size(frag);
-+      }
-+
-+      q->head = index;
-+      q->queued += i;
-+
-+      skb_tx_timestamp(skb);
-+      netdev_tx_sent_queue(txq, skb->len);
-+
-+      if (netif_xmit_stopped(txq) || !netdev_xmit_more())
-+              airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
-+                              TX_RING_CPU_IDX_MASK,
-+                              FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
-+
-+      if (q->ndesc - q->queued < q->free_thr)
-+              netif_tx_stop_queue(txq);
-+
-+      spin_unlock_bh(&q->lock);
-+
-+      return NETDEV_TX_OK;
-+
-+error_unmap:
-+      for (i--; i >= 0; i--) {
-+              index = (q->head + i) % q->ndesc;
-+              dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
-+                               q->entry[index].dma_len, DMA_TO_DEVICE);
-+      }
-+
-+      spin_unlock_bh(&q->lock);
-+error:
-+      dev_kfree_skb_any(skb);
-+      dev->stats.tx_dropped++;
-+
-+      return NETDEV_TX_OK;
-+}
-+
-+static void airoha_ethtool_get_drvinfo(struct net_device *dev,
-+                                     struct ethtool_drvinfo *info)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct airoha_eth *eth = port->qdma->eth;
-+
-+      strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
-+      strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
-+}
-+
-+static void airoha_ethtool_get_mac_stats(struct net_device *dev,
-+                                       struct ethtool_eth_mac_stats *stats)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      unsigned int start;
-+
-+      airoha_update_hw_stats(port);
-+      do {
-+              start = u64_stats_fetch_begin(&port->stats.syncp);
-+              stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
-+              stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
-+              stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
-+      } while (u64_stats_fetch_retry(&port->stats.syncp, start));
-+}
-+
-+static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
-+      {    0,    64 },
-+      {   65,   127 },
-+      {  128,   255 },
-+      {  256,   511 },
-+      {  512,  1023 },
-+      { 1024,  1518 },
-+      { 1519, 10239 },
-+      {},
-+};
-+
-+static void
-+airoha_ethtool_get_rmon_stats(struct net_device *dev,
-+                            struct ethtool_rmon_stats *stats,
-+                            const struct ethtool_rmon_hist_range **ranges)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct airoha_hw_stats *hw_stats = &port->stats;
-+      unsigned int start;
-+
-+      BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
-+                   ARRAY_SIZE(hw_stats->tx_len) + 1);
-+      BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
-+                   ARRAY_SIZE(hw_stats->rx_len) + 1);
-+
-+      *ranges = airoha_ethtool_rmon_ranges;
-+      airoha_update_hw_stats(port);
-+      do {
-+              int i;
-+
-+              start = u64_stats_fetch_begin(&port->stats.syncp);
-+              stats->fragments = hw_stats->rx_fragment;
-+              stats->jabbers = hw_stats->rx_jabber;
-+              for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
-+                   i++) {
-+                      stats->hist[i] = hw_stats->rx_len[i];
-+                      stats->hist_tx[i] = hw_stats->tx_len[i];
-+              }
-+      } while (u64_stats_fetch_retry(&port->stats.syncp, start));
-+}
-+
-+static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
-+                                       int channel, enum tx_sched_mode mode,
-+                                       const u16 *weights, u8 n_weights)
-+{
-+      int i;
-+
-+      for (i = 0; i < AIROHA_NUM_TX_RING; i++)
-+              airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
-+                                TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
-+
-+      for (i = 0; i < n_weights; i++) {
-+              u32 status;
-+              int err;
-+
-+              airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
-+                             TWRR_RW_CMD_MASK |
-+                             FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
-+                             FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
-+                             FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
-+              err = read_poll_timeout(airoha_qdma_rr, status,
-+                                      status & TWRR_RW_CMD_DONE,
-+                                      USEC_PER_MSEC, 10 * USEC_PER_MSEC,
-+                                      true, port->qdma,
-+                                      REG_TXWRR_WEIGHT_CFG);
-+              if (err)
-+                      return err;
-+      }
-+
-+      airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
-+                      CHAN_QOS_MODE_MASK(channel),
-+                      mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
-+
-+      return 0;
-+}
-+
-+static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
-+                                       int channel)
-+{
-+      static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
-+
-+      return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
-+                                           ARRAY_SIZE(w));
-+}
-+
-+static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
-+                                      int channel,
-+                                      struct tc_ets_qopt_offload *opt)
-+{
-+      struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
-+      enum tx_sched_mode mode = TC_SCH_SP;
-+      u16 w[AIROHA_NUM_QOS_QUEUES] = {};
-+      int i, nstrict = 0, nwrr, qidx;
-+
-+      if (p->bands > AIROHA_NUM_QOS_QUEUES)
-+              return -EINVAL;
-+
-+      for (i = 0; i < p->bands; i++) {
-+              if (!p->quanta[i])
-+                      nstrict++;
-+      }
-+
-+      /* this configuration is not supported by the hw */
-+      if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
-+              return -EINVAL;
-+
-+      /* EN7581 SoC supports fixed QoS band priority where WRR queues have
-+       * lowest priorities with respect to SP ones.
-+       * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
-+       */
-+      nwrr = p->bands - nstrict;
-+      qidx = nstrict && nwrr ? nstrict : 0;
-+      for (i = 1; i <= p->bands; i++) {
-+              if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx)
-+                      return -EINVAL;
-+
-+              qidx = i == nwrr ? 0 : qidx + 1;
-+      }
-+
-+      for (i = 0; i < nwrr; i++)
-+              w[i] = p->weights[nstrict + i];
-+
-+      if (!nstrict)
-+              mode = TC_SCH_WRR8;
-+      else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
-+              mode = nstrict + 1;
-+
-+      return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
-+                                           ARRAY_SIZE(w));
-+}
-+
-+static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
-+                                      int channel,
-+                                      struct tc_ets_qopt_offload *opt)
-+{
-+      u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
-+                                          REG_CNTR_VAL(channel << 1));
-+      u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
-+                                          REG_CNTR_VAL((channel << 1) + 1));
-+      u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
-+                       (fwd_tx_packets - port->fwd_tx_packets);
-+      _bstats_update(opt->stats.bstats, 0, tx_packets);
-+
-+      port->cpu_tx_packets = cpu_tx_packets;
-+      port->fwd_tx_packets = fwd_tx_packets;
-+
-+      return 0;
-+}
-+
-+static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
-+                                   struct tc_ets_qopt_offload *opt)
-+{
-+      int channel = TC_H_MAJ(opt->handle) >> 16;
-+
-+      if (opt->parent == TC_H_ROOT)
-+              return -EINVAL;
-+
-+      switch (opt->command) {
-+      case TC_ETS_REPLACE:
-+              return airoha_qdma_set_tx_ets_sched(port, channel, opt);
-+      case TC_ETS_DESTROY:
-+              /* PRIO is default qdisc scheduler */
-+              return airoha_qdma_set_tx_prio_sched(port, channel);
-+      case TC_ETS_STATS:
-+              return airoha_qdma_get_tx_ets_stats(port, channel, opt);
-+      default:
-+              return -EOPNOTSUPP;
-+      }
-+}
-+
-+static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
-+                                     u32 addr, enum trtcm_param_type param,
-+                                     enum trtcm_mode_type mode,
-+                                     u32 *val_low, u32 *val_high)
-+{
-+      u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
-+      u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
-+                        FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
-+                        FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
-+                        FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
-+
-+      airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
-+      if (read_poll_timeout(airoha_qdma_rr, val,
-+                            val & TRTCM_PARAM_RW_DONE_MASK,
-+                            USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
-+                            qdma, REG_TRTCM_CFG_PARAM(addr)))
-+              return -ETIMEDOUT;
-+
-+      *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
-+      if (val_high)
-+              *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
-+
-+      return 0;
-+}
-+
-+static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
-+                                     u32 addr, enum trtcm_param_type param,
-+                                     enum trtcm_mode_type mode, u32 val)
-+{
-+      u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
-+      u32 config = TRTCM_PARAM_RW_MASK |
-+                   FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
-+                   FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
-+                   FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
-+                   FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
-+
-+      airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
-+      airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
-+
-+      return read_poll_timeout(airoha_qdma_rr, val,
-+                               val & TRTCM_PARAM_RW_DONE_MASK,
-+                               USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
-+                               qdma, REG_TRTCM_CFG_PARAM(addr));
-+}
-+
-+static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
-+                                      u32 addr, enum trtcm_mode_type mode,
-+                                      bool enable, u32 enable_mask)
-+{
-+      u32 val;
-+
-+      if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
-+                                      mode, &val, NULL))
-+              return -EINVAL;
-+
-+      val = enable ? val | enable_mask : val & ~enable_mask;
-+
-+      return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
-+                                         mode, val);
-+}
-+
-+static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
-+                                            int channel, u32 addr,
-+                                            enum trtcm_mode_type mode,
-+                                            u32 rate_val, u32 bucket_size)
-+{
-+      u32 val, config, tick, unit, rate, rate_frac;
-+      int err;
-+
-+      if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
-+                                      mode, &config, NULL))
-+              return -EINVAL;
-+
-+      val = airoha_qdma_rr(qdma, addr);
-+      tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
-+      if (config & TRTCM_TICK_SEL)
-+              tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
-+      if (!tick)
-+              return -EINVAL;
-+
-+      unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
-+      if (!unit)
-+              return -EINVAL;
-+
-+      rate = rate_val / unit;
-+      rate_frac = rate_val % unit;
-+      rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
-+      rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
-+             FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
-+
-+      err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
-+                                        TRTCM_TOKEN_RATE_MODE, mode, rate);
-+      if (err)
-+              return err;
-+
-+      val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
-+      val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
-+
-+      return airoha_qdma_set_trtcm_param(qdma, channel, addr,
-+                                         TRTCM_BUCKETSIZE_SHIFT_MODE,
-+                                         mode, val);
-+}
-+
-+static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
-+                                       int channel, u32 rate,
-+                                       u32 bucket_size)
-+{
-+      int i, err;
-+
-+      for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
-+              err = airoha_qdma_set_trtcm_config(port->qdma, channel,
-+                                                 REG_EGRESS_TRTCM_CFG, i,
-+                                                 !!rate, TRTCM_METER_MODE);
-+              if (err)
-+                      return err;
-+
-+              err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
-+                                                       REG_EGRESS_TRTCM_CFG,
-+                                                       i, rate, bucket_size);
-+              if (err)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
-+                                        struct tc_htb_qopt_offload *opt)
-+{
-+      u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
-+      u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
-+      struct net_device *dev = port->dev;
-+      int num_tx_queues = dev->real_num_tx_queues;
-+      int err;
-+
-+      if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
-+              NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
-+              return -EINVAL;
-+      }
-+
-+      err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
-+      if (err) {
-+              NL_SET_ERR_MSG_MOD(opt->extack,
-+                                 "failed configuring htb offload");
-+              return err;
-+      }
-+
-+      if (opt->command == TC_HTB_NODE_MODIFY)
-+              return 0;
-+
-+      err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
-+      if (err) {
-+              airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
-+              NL_SET_ERR_MSG_MOD(opt->extack,
-+                                 "failed setting real_num_tx_queues");
-+              return err;
-+      }
-+
-+      set_bit(channel, port->qos_sq_bmap);
-+      opt->qid = AIROHA_NUM_TX_RING + channel;
-+
-+      return 0;
-+}
-+
-+static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
-+{
-+      struct net_device *dev = port->dev;
-+
-+      netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
-+      airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
-+      clear_bit(queue, port->qos_sq_bmap);
-+}
-+
-+static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
-+                                         struct tc_htb_qopt_offload *opt)
-+{
-+      u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
-+
-+      if (!test_bit(channel, port->qos_sq_bmap)) {
-+              NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
-+              return -EINVAL;
-+      }
-+
-+      airoha_tc_remove_htb_queue(port, channel);
-+
-+      return 0;
-+}
-+
-+static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
-+{
-+      int q;
-+
-+      for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
-+              airoha_tc_remove_htb_queue(port, q);
-+
-+      return 0;
-+}
-+
-+static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
-+                                          struct tc_htb_qopt_offload *opt)
-+{
-+      u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
-+
-+      if (!test_bit(channel, port->qos_sq_bmap)) {
-+              NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
-+              return -EINVAL;
-+      }
-+
-+      opt->qid = channel;
-+
-+      return 0;
-+}
-+
-+static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
-+                                   struct tc_htb_qopt_offload *opt)
-+{
-+      switch (opt->command) {
-+      case TC_HTB_CREATE:
-+              break;
-+      case TC_HTB_DESTROY:
-+              return airoha_tc_htb_destroy(port);
-+      case TC_HTB_NODE_MODIFY:
-+      case TC_HTB_LEAF_ALLOC_QUEUE:
-+              return airoha_tc_htb_alloc_leaf_queue(port, opt);
-+      case TC_HTB_LEAF_DEL:
-+      case TC_HTB_LEAF_DEL_LAST:
-+      case TC_HTB_LEAF_DEL_LAST_FORCE:
-+              return airoha_tc_htb_delete_leaf_queue(port, opt);
-+      case TC_HTB_LEAF_QUERY_QUEUE:
-+              return airoha_tc_get_htb_get_leaf_queue(port, opt);
-+      default:
-+              return -EOPNOTSUPP;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
-+                             void *type_data)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+
-+      switch (type) {
-+      case TC_SETUP_QDISC_ETS:
-+              return airoha_tc_setup_qdisc_ets(port, type_data);
-+      case TC_SETUP_QDISC_HTB:
-+              return airoha_tc_setup_qdisc_htb(port, type_data);
-+      default:
-+              return -EOPNOTSUPP;
-+      }
-+}
-+
-+static const struct net_device_ops airoha_netdev_ops = {
-+      .ndo_init               = airoha_dev_init,
-+      .ndo_open               = airoha_dev_open,
-+      .ndo_stop               = airoha_dev_stop,
-+      .ndo_select_queue       = airoha_dev_select_queue,
-+      .ndo_start_xmit         = airoha_dev_xmit,
-+      .ndo_get_stats64        = airoha_dev_get_stats64,
-+      .ndo_set_mac_address    = airoha_dev_set_macaddr,
-+      .ndo_setup_tc           = airoha_dev_tc_setup,
-+};
-+
-+static const struct ethtool_ops airoha_ethtool_ops = {
-+      .get_drvinfo            = airoha_ethtool_get_drvinfo,
-+      .get_eth_mac_stats      = airoha_ethtool_get_mac_stats,
-+      .get_rmon_stats         = airoha_ethtool_get_rmon_stats,
-+};
-+
-+static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
-+{
-+      const __be32 *id_ptr = of_get_property(np, "reg", NULL);
-+      struct airoha_gdm_port *port;
-+      struct airoha_qdma *qdma;
-+      struct net_device *dev;
-+      int err, index;
-+      u32 id;
-+
-+      if (!id_ptr) {
-+              dev_err(eth->dev, "missing gdm port id\n");
-+              return -EINVAL;
-+      }
-+
-+      id = be32_to_cpup(id_ptr);
-+      index = id - 1;
-+
-+      if (!id || id > ARRAY_SIZE(eth->ports)) {
-+              dev_err(eth->dev, "invalid gdm port id: %d\n", id);
-+              return -EINVAL;
-+      }
-+
-+      if (eth->ports[index]) {
-+              dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
-+              return -EINVAL;
-+      }
-+
-+      dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
-+                                    AIROHA_NUM_NETDEV_TX_RINGS,
-+                                    AIROHA_NUM_RX_RING);
-+      if (!dev) {
-+              dev_err(eth->dev, "alloc_etherdev failed\n");
-+              return -ENOMEM;
-+      }
-+
-+      qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
-+      dev->netdev_ops = &airoha_netdev_ops;
-+      dev->ethtool_ops = &airoha_ethtool_ops;
-+      dev->max_mtu = AIROHA_MAX_MTU;
-+      dev->watchdog_timeo = 5 * HZ;
-+      dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-+                         NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
-+                         NETIF_F_SG | NETIF_F_TSO |
-+                         NETIF_F_HW_TC;
-+      dev->features |= dev->hw_features;
-+      dev->dev.of_node = np;
-+      dev->irq = qdma->irq;
-+      SET_NETDEV_DEV(dev, eth->dev);
-+
-+      /* reserve hw queues for HTB offloading */
-+      err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
-+      if (err)
-+              return err;
-+
-+      err = of_get_ethdev_address(np, dev);
-+      if (err) {
-+              if (err == -EPROBE_DEFER)
-+                      return err;
-+
-+              eth_hw_addr_random(dev);
-+              dev_info(eth->dev, "generated random MAC address %pM\n",
-+                       dev->dev_addr);
-+      }
-+
-+      port = netdev_priv(dev);
-+      u64_stats_init(&port->stats.syncp);
-+      spin_lock_init(&port->stats.lock);
-+      port->qdma = qdma;
-+      port->dev = dev;
-+      port->id = id;
-+      eth->ports[index] = port;
-+
-+      return register_netdev(dev);
-+}
-+
-+static int airoha_probe(struct platform_device *pdev)
-+{
-+      struct device_node *np;
-+      struct airoha_eth *eth;
-+      int i, err;
-+
-+      eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
-+      if (!eth)
-+              return -ENOMEM;
-+
-+      eth->dev = &pdev->dev;
-+
-+      err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
-+      if (err) {
-+              dev_err(eth->dev, "failed configuring DMA mask\n");
-+              return err;
-+      }
-+
-+      eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
-+      if (IS_ERR(eth->fe_regs))
-+              return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
-+                                   "failed to iomap fe regs\n");
-+
-+      eth->rsts[0].id = "fe";
-+      eth->rsts[1].id = "pdma";
-+      eth->rsts[2].id = "qdma";
-+      err = devm_reset_control_bulk_get_exclusive(eth->dev,
-+                                                  ARRAY_SIZE(eth->rsts),
-+                                                  eth->rsts);
-+      if (err) {
-+              dev_err(eth->dev, "failed to get bulk reset lines\n");
-+              return err;
-+      }
-+
-+      eth->xsi_rsts[0].id = "xsi-mac";
-+      eth->xsi_rsts[1].id = "hsi0-mac";
-+      eth->xsi_rsts[2].id = "hsi1-mac";
-+      eth->xsi_rsts[3].id = "hsi-mac";
-+      eth->xsi_rsts[4].id = "xfp-mac";
-+      err = devm_reset_control_bulk_get_exclusive(eth->dev,
-+                                                  ARRAY_SIZE(eth->xsi_rsts),
-+                                                  eth->xsi_rsts);
-+      if (err) {
-+              dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
-+              return err;
-+      }
-+
-+      eth->napi_dev = alloc_netdev_dummy(0);
-+      if (!eth->napi_dev)
-+              return -ENOMEM;
-+
-+      /* Enable threaded NAPI by default */
-+      eth->napi_dev->threaded = true;
-+      strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
-+      platform_set_drvdata(pdev, eth);
-+
-+      err = airoha_hw_init(pdev, eth);
-+      if (err)
-+              goto error_hw_cleanup;
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
-+              airoha_qdma_start_napi(&eth->qdma[i]);
-+
-+      for_each_child_of_node(pdev->dev.of_node, np) {
-+              if (!of_device_is_compatible(np, "airoha,eth-mac"))
-+                      continue;
-+
-+              if (!of_device_is_available(np))
-+                      continue;
-+
-+              err = airoha_alloc_gdm_port(eth, np);
-+              if (err) {
-+                      of_node_put(np);
-+                      goto error_napi_stop;
-+              }
-+      }
-+
-+      return 0;
-+
-+error_napi_stop:
-+      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
-+              airoha_qdma_stop_napi(&eth->qdma[i]);
-+error_hw_cleanup:
-+      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
-+              airoha_hw_cleanup(&eth->qdma[i]);
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
-+              struct airoha_gdm_port *port = eth->ports[i];
-+
-+              if (port && port->dev->reg_state == NETREG_REGISTERED)
-+                      unregister_netdev(port->dev);
-+      }
-+      free_netdev(eth->napi_dev);
-+      platform_set_drvdata(pdev, NULL);
-+
-+      return err;
-+}
-+
-+static void airoha_remove(struct platform_device *pdev)
-+{
-+      struct airoha_eth *eth = platform_get_drvdata(pdev);
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
-+              airoha_qdma_stop_napi(&eth->qdma[i]);
-+              airoha_hw_cleanup(&eth->qdma[i]);
-+      }
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
-+              struct airoha_gdm_port *port = eth->ports[i];
-+
-+              if (!port)
-+                      continue;
-+
-+              airoha_dev_stop(port->dev);
-+              unregister_netdev(port->dev);
-+      }
-+      free_netdev(eth->napi_dev);
-+
-+      platform_set_drvdata(pdev, NULL);
-+}
-+
-+static const struct of_device_id of_airoha_match[] = {
-+      { .compatible = "airoha,en7581-eth" },
-+      { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, of_airoha_match);
-+
-+static struct platform_driver airoha_driver = {
-+      .probe = airoha_probe,
-+      .remove_new = airoha_remove,
-+      .driver = {
-+              .name = KBUILD_MODNAME,
-+              .of_match_table = of_airoha_match,
-+      },
-+};
-+module_platform_driver(airoha_driver);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
-+MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
---- a/drivers/net/ethernet/mediatek/airoha_eth.c
-+++ /dev/null
-@@ -1,3359 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0-only
--/*
-- * Copyright (c) 2024 AIROHA Inc
-- * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-- */
--#include <linux/etherdevice.h>
--#include <linux/iopoll.h>
--#include <linux/kernel.h>
--#include <linux/netdevice.h>
--#include <linux/of.h>
--#include <linux/of_net.h>
--#include <linux/platform_device.h>
--#include <linux/reset.h>
--#include <linux/tcp.h>
--#include <linux/u64_stats_sync.h>
--#include <net/dsa.h>
--#include <net/page_pool/helpers.h>
--#include <net/pkt_cls.h>
--#include <uapi/linux/ppp_defs.h>
--
--#define AIROHA_MAX_NUM_GDM_PORTS      1
--#define AIROHA_MAX_NUM_QDMA           2
--#define AIROHA_MAX_NUM_RSTS           3
--#define AIROHA_MAX_NUM_XSI_RSTS               5
--#define AIROHA_MAX_MTU                        2000
--#define AIROHA_MAX_PACKET_SIZE                2048
--#define AIROHA_NUM_QOS_CHANNELS               4
--#define AIROHA_NUM_QOS_QUEUES         8
--#define AIROHA_NUM_TX_RING            32
--#define AIROHA_NUM_RX_RING            32
--#define AIROHA_NUM_NETDEV_TX_RINGS    (AIROHA_NUM_TX_RING + \
--                                       AIROHA_NUM_QOS_CHANNELS)
--#define AIROHA_FE_MC_MAX_VLAN_TABLE   64
--#define AIROHA_FE_MC_MAX_VLAN_PORT    16
--#define AIROHA_NUM_TX_IRQ             2
--#define HW_DSCP_NUM                   2048
--#define IRQ_QUEUE_LEN(_n)             ((_n) ? 1024 : 2048)
--#define TX_DSCP_NUM                   1024
--#define RX_DSCP_NUM(_n)                       \
--      ((_n) ==  2 ? 128 :             \
--       (_n) == 11 ? 128 :             \
--       (_n) == 15 ? 128 :             \
--       (_n) ==  0 ? 1024 : 16)
--
--#define PSE_RSV_PAGES                 128
--#define PSE_QUEUE_RSV_PAGES           64
--
--#define QDMA_METER_IDX(_n)            ((_n) & 0xff)
--#define QDMA_METER_GROUP(_n)          (((_n) >> 8) & 0x3)
--
--/* FE */
--#define PSE_BASE                      0x0100
--#define CSR_IFC_BASE                  0x0200
--#define CDM1_BASE                     0x0400
--#define GDM1_BASE                     0x0500
--#define PPE1_BASE                     0x0c00
--
--#define CDM2_BASE                     0x1400
--#define GDM2_BASE                     0x1500
--
--#define GDM3_BASE                     0x1100
--#define GDM4_BASE                     0x2500
--
--#define GDM_BASE(_n)                  \
--      ((_n) == 4 ? GDM4_BASE :        \
--       (_n) == 3 ? GDM3_BASE :        \
--       (_n) == 2 ? GDM2_BASE : GDM1_BASE)
--
--#define REG_FE_DMA_GLO_CFG            0x0000
--#define FE_DMA_GLO_L2_SPACE_MASK      GENMASK(7, 4)
--#define FE_DMA_GLO_PG_SZ_MASK         BIT(3)
--
--#define REG_FE_RST_GLO_CFG            0x0004
--#define FE_RST_GDM4_MBI_ARB_MASK      BIT(3)
--#define FE_RST_GDM3_MBI_ARB_MASK      BIT(2)
--#define FE_RST_CORE_MASK              BIT(0)
--
--#define REG_FE_WAN_MAC_H              0x0030
--#define REG_FE_LAN_MAC_H              0x0040
--
--#define REG_FE_MAC_LMIN(_n)           ((_n) + 0x04)
--#define REG_FE_MAC_LMAX(_n)           ((_n) + 0x08)
--
--#define REG_FE_CDM1_OQ_MAP0           0x0050
--#define REG_FE_CDM1_OQ_MAP1           0x0054
--#define REG_FE_CDM1_OQ_MAP2           0x0058
--#define REG_FE_CDM1_OQ_MAP3           0x005c
--
--#define REG_FE_PCE_CFG                        0x0070
--#define PCE_DPI_EN_MASK                       BIT(2)
--#define PCE_KA_EN_MASK                        BIT(1)
--#define PCE_MC_EN_MASK                        BIT(0)
--
--#define REG_FE_PSE_QUEUE_CFG_WR               0x0080
--#define PSE_CFG_PORT_ID_MASK          GENMASK(27, 24)
--#define PSE_CFG_QUEUE_ID_MASK         GENMASK(20, 16)
--#define PSE_CFG_WR_EN_MASK            BIT(8)
--#define PSE_CFG_OQRSV_SEL_MASK                BIT(0)
--
--#define REG_FE_PSE_QUEUE_CFG_VAL      0x0084
--#define PSE_CFG_OQ_RSV_MASK           GENMASK(13, 0)
--
--#define PSE_FQ_CFG                    0x008c
--#define PSE_FQ_LIMIT_MASK             GENMASK(14, 0)
--
--#define REG_FE_PSE_BUF_SET            0x0090
--#define PSE_SHARE_USED_LTHD_MASK      GENMASK(31, 16)
--#define PSE_ALLRSV_MASK                       GENMASK(14, 0)
--
--#define REG_PSE_SHARE_USED_THD                0x0094
--#define PSE_SHARE_USED_MTHD_MASK      GENMASK(31, 16)
--#define PSE_SHARE_USED_HTHD_MASK      GENMASK(15, 0)
--
--#define REG_GDM_MISC_CFG              0x0148
--#define GDM2_RDM_ACK_WAIT_PREF_MASK   BIT(9)
--#define GDM2_CHN_VLD_MODE_MASK                BIT(5)
--
--#define REG_FE_CSR_IFC_CFG            CSR_IFC_BASE
--#define FE_IFC_EN_MASK                        BIT(0)
--
--#define REG_FE_VIP_PORT_EN            0x01f0
--#define REG_FE_IFC_PORT_EN            0x01f4
--
--#define REG_PSE_IQ_REV1                       (PSE_BASE + 0x08)
--#define PSE_IQ_RES1_P2_MASK           GENMASK(23, 16)
--
--#define REG_PSE_IQ_REV2                       (PSE_BASE + 0x0c)
--#define PSE_IQ_RES2_P5_MASK           GENMASK(15, 8)
--#define PSE_IQ_RES2_P4_MASK           GENMASK(7, 0)
--
--#define REG_FE_VIP_EN(_n)             (0x0300 + ((_n) << 3))
--#define PATN_FCPU_EN_MASK             BIT(7)
--#define PATN_SWP_EN_MASK              BIT(6)
--#define PATN_DP_EN_MASK                       BIT(5)
--#define PATN_SP_EN_MASK                       BIT(4)
--#define PATN_TYPE_MASK                        GENMASK(3, 1)
--#define PATN_EN_MASK                  BIT(0)
--
--#define REG_FE_VIP_PATN(_n)           (0x0304 + ((_n) << 3))
--#define PATN_DP_MASK                  GENMASK(31, 16)
--#define PATN_SP_MASK                  GENMASK(15, 0)
--
--#define REG_CDM1_VLAN_CTRL            CDM1_BASE
--#define CDM1_VLAN_MASK                        GENMASK(31, 16)
--
--#define REG_CDM1_FWD_CFG              (CDM1_BASE + 0x08)
--#define CDM1_VIP_QSEL_MASK            GENMASK(24, 20)
--
--#define REG_CDM1_CRSN_QSEL(_n)                (CDM1_BASE + 0x10 + ((_n) << 2))
--#define CDM1_CRSN_QSEL_REASON_MASK(_n)        \
--      GENMASK(4 + (((_n) % 4) << 3),  (((_n) % 4) << 3))
--
--#define REG_CDM2_FWD_CFG              (CDM2_BASE + 0x08)
--#define CDM2_OAM_QSEL_MASK            GENMASK(31, 27)
--#define CDM2_VIP_QSEL_MASK            GENMASK(24, 20)
--
--#define REG_CDM2_CRSN_QSEL(_n)                (CDM2_BASE + 0x10 + ((_n) << 2))
--#define CDM2_CRSN_QSEL_REASON_MASK(_n)        \
--      GENMASK(4 + (((_n) % 4) << 3),  (((_n) % 4) << 3))
--
--#define REG_GDM_FWD_CFG(_n)           GDM_BASE(_n)
--#define GDM_DROP_CRC_ERR              BIT(23)
--#define GDM_IP4_CKSUM                 BIT(22)
--#define GDM_TCP_CKSUM                 BIT(21)
--#define GDM_UDP_CKSUM                 BIT(20)
--#define GDM_UCFQ_MASK                 GENMASK(15, 12)
--#define GDM_BCFQ_MASK                 GENMASK(11, 8)
--#define GDM_MCFQ_MASK                 GENMASK(7, 4)
--#define GDM_OCFQ_MASK                 GENMASK(3, 0)
--
--#define REG_GDM_INGRESS_CFG(_n)               (GDM_BASE(_n) + 0x10)
--#define GDM_INGRESS_FC_EN_MASK                BIT(1)
--#define GDM_STAG_EN_MASK              BIT(0)
--
--#define REG_GDM_LEN_CFG(_n)           (GDM_BASE(_n) + 0x14)
--#define GDM_SHORT_LEN_MASK            GENMASK(13, 0)
--#define GDM_LONG_LEN_MASK             GENMASK(29, 16)
--
--#define REG_FE_CPORT_CFG              (GDM1_BASE + 0x40)
--#define FE_CPORT_PAD                  BIT(26)
--#define FE_CPORT_PORT_XFC_MASK                BIT(25)
--#define FE_CPORT_QUEUE_XFC_MASK               BIT(24)
--
--#define REG_FE_GDM_MIB_CLEAR(_n)      (GDM_BASE(_n) + 0xf0)
--#define FE_GDM_MIB_RX_CLEAR_MASK      BIT(1)
--#define FE_GDM_MIB_TX_CLEAR_MASK      BIT(0)
--
--#define REG_FE_GDM1_MIB_CFG           (GDM1_BASE + 0xf4)
--#define FE_STRICT_RFC2819_MODE_MASK   BIT(31)
--#define FE_GDM1_TX_MIB_SPLIT_EN_MASK  BIT(17)
--#define FE_GDM1_RX_MIB_SPLIT_EN_MASK  BIT(16)
--#define FE_TX_MIB_ID_MASK             GENMASK(15, 8)
--#define FE_RX_MIB_ID_MASK             GENMASK(7, 0)
--
--#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n)                (GDM_BASE(_n) + 0x104)
--#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n)               (GDM_BASE(_n) + 0x10c)
--#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n)               (GDM_BASE(_n) + 0x110)
--#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n)      (GDM_BASE(_n) + 0x114)
--#define REG_FE_GDM_TX_ETH_DROP_CNT(_n)                (GDM_BASE(_n) + 0x118)
--#define REG_FE_GDM_TX_ETH_BC_CNT(_n)          (GDM_BASE(_n) + 0x11c)
--#define REG_FE_GDM_TX_ETH_MC_CNT(_n)          (GDM_BASE(_n) + 0x120)
--#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n)                (GDM_BASE(_n) + 0x124)
--#define REG_FE_GDM_TX_ETH_LONG_CNT(_n)                (GDM_BASE(_n) + 0x128)
--#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n)               (GDM_BASE(_n) + 0x12c)
--#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n)               (GDM_BASE(_n) + 0x130)
--#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n)      (GDM_BASE(_n) + 0x134)
--#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n)      (GDM_BASE(_n) + 0x138)
--#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n)      (GDM_BASE(_n) + 0x13c)
--#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n)     (GDM_BASE(_n) + 0x140)
--
--#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n)                (GDM_BASE(_n) + 0x148)
--#define REG_FE_GDM_RX_FC_DROP_CNT(_n)         (GDM_BASE(_n) + 0x14c)
--#define REG_FE_GDM_RX_RC_DROP_CNT(_n)         (GDM_BASE(_n) + 0x150)
--#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n)   (GDM_BASE(_n) + 0x154)
--#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n)      (GDM_BASE(_n) + 0x158)
--#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n)               (GDM_BASE(_n) + 0x15c)
--#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n)               (GDM_BASE(_n) + 0x160)
--#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n)      (GDM_BASE(_n) + 0x164)
--#define REG_FE_GDM_RX_ETH_DROP_CNT(_n)                (GDM_BASE(_n) + 0x168)
--#define REG_FE_GDM_RX_ETH_BC_CNT(_n)          (GDM_BASE(_n) + 0x16c)
--#define REG_FE_GDM_RX_ETH_MC_CNT(_n)          (GDM_BASE(_n) + 0x170)
--#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n)     (GDM_BASE(_n) + 0x174)
--#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n)                (GDM_BASE(_n) + 0x178)
--#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n)      (GDM_BASE(_n) + 0x17c)
--#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n)                (GDM_BASE(_n) + 0x180)
--#define REG_FE_GDM_RX_ETH_LONG_CNT(_n)                (GDM_BASE(_n) + 0x184)
--#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n)               (GDM_BASE(_n) + 0x188)
--#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n)               (GDM_BASE(_n) + 0x18c)
--#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n)      (GDM_BASE(_n) + 0x190)
--#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n)      (GDM_BASE(_n) + 0x194)
--#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n)      (GDM_BASE(_n) + 0x198)
--#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n)     (GDM_BASE(_n) + 0x19c)
--
--#define REG_PPE1_TB_HASH_CFG          (PPE1_BASE + 0x250)
--#define PPE1_SRAM_TABLE_EN_MASK               BIT(0)
--#define PPE1_SRAM_HASH1_EN_MASK               BIT(8)
--#define PPE1_DRAM_TABLE_EN_MASK               BIT(16)
--#define PPE1_DRAM_HASH1_EN_MASK               BIT(24)
--
--#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n)                (GDM_BASE(_n) + 0x280)
--#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n)               (GDM_BASE(_n) + 0x284)
--#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n)               (GDM_BASE(_n) + 0x288)
--#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n)      (GDM_BASE(_n) + 0x28c)
--
--#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n)                (GDM_BASE(_n) + 0x290)
--#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n)               (GDM_BASE(_n) + 0x294)
--#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n)               (GDM_BASE(_n) + 0x298)
--#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n)      (GDM_BASE(_n) + 0x29c)
--#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n)               (GDM_BASE(_n) + 0x2b8)
--#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n)               (GDM_BASE(_n) + 0x2bc)
--#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n)      (GDM_BASE(_n) + 0x2c0)
--#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n)      (GDM_BASE(_n) + 0x2c4)
--#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n)      (GDM_BASE(_n) + 0x2c8)
--#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n)     (GDM_BASE(_n) + 0x2cc)
--#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n)               (GDM_BASE(_n) + 0x2e8)
--#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n)               (GDM_BASE(_n) + 0x2ec)
--#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n)      (GDM_BASE(_n) + 0x2f0)
--#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n)      (GDM_BASE(_n) + 0x2f4)
--#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n)      (GDM_BASE(_n) + 0x2f8)
--#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n)     (GDM_BASE(_n) + 0x2fc)
--
--#define REG_GDM2_CHN_RLS              (GDM2_BASE + 0x20)
--#define MBI_RX_AGE_SEL_MASK           GENMASK(26, 25)
--#define MBI_TX_AGE_SEL_MASK           GENMASK(18, 17)
--
--#define REG_GDM3_FWD_CFG              GDM3_BASE
--#define GDM3_PAD_EN_MASK              BIT(28)
--
--#define REG_GDM4_FWD_CFG              GDM4_BASE
--#define GDM4_PAD_EN_MASK              BIT(28)
--#define GDM4_SPORT_OFFSET0_MASK               GENMASK(11, 8)
--
--#define REG_GDM4_SRC_PORT_SET         (GDM4_BASE + 0x23c)
--#define GDM4_SPORT_OFF2_MASK          GENMASK(19, 16)
--#define GDM4_SPORT_OFF1_MASK          GENMASK(15, 12)
--#define GDM4_SPORT_OFF0_MASK          GENMASK(11, 8)
--
--#define REG_IP_FRAG_FP                        0x2010
--#define IP_ASSEMBLE_PORT_MASK         GENMASK(24, 21)
--#define IP_ASSEMBLE_NBQ_MASK          GENMASK(20, 16)
--#define IP_FRAGMENT_PORT_MASK         GENMASK(8, 5)
--#define IP_FRAGMENT_NBQ_MASK          GENMASK(4, 0)
--
--#define REG_MC_VLAN_EN                        0x2100
--#define MC_VLAN_EN_MASK                       BIT(0)
--
--#define REG_MC_VLAN_CFG                       0x2104
--#define MC_VLAN_CFG_CMD_DONE_MASK     BIT(31)
--#define MC_VLAN_CFG_TABLE_ID_MASK     GENMASK(21, 16)
--#define MC_VLAN_CFG_PORT_ID_MASK      GENMASK(11, 8)
--#define MC_VLAN_CFG_TABLE_SEL_MASK    BIT(4)
--#define MC_VLAN_CFG_RW_MASK           BIT(0)
--
--#define REG_MC_VLAN_DATA              0x2108
--
--#define REG_CDM5_RX_OQ1_DROP_CNT      0x29d4
--
--/* QDMA */
--#define REG_QDMA_GLOBAL_CFG                   0x0004
--#define GLOBAL_CFG_RX_2B_OFFSET_MASK          BIT(31)
--#define GLOBAL_CFG_DMA_PREFERENCE_MASK                GENMASK(30, 29)
--#define GLOBAL_CFG_CPU_TXR_RR_MASK            BIT(28)
--#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK                BIT(27)
--#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK     BIT(26)
--#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK   BIT(25)
--#define GLOBAL_CFG_OAM_MODIFY_MASK            BIT(24)
--#define GLOBAL_CFG_RESET_MASK                 BIT(23)
--#define GLOBAL_CFG_RESET_DONE_MASK            BIT(22)
--#define GLOBAL_CFG_MULTICAST_EN_MASK          BIT(21)
--#define GLOBAL_CFG_IRQ1_EN_MASK                       BIT(20)
--#define GLOBAL_CFG_IRQ0_EN_MASK                       BIT(19)
--#define GLOBAL_CFG_LOOPCNT_EN_MASK            BIT(18)
--#define GLOBAL_CFG_RD_BYPASS_WR_MASK          BIT(17)
--#define GLOBAL_CFG_QDMA_LOOPBACK_MASK         BIT(16)
--#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK          GENMASK(13, 8)
--#define GLOBAL_CFG_CHECK_DONE_MASK            BIT(7)
--#define GLOBAL_CFG_TX_WB_DONE_MASK            BIT(6)
--#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK         GENMASK(5, 4)
--#define GLOBAL_CFG_RX_DMA_BUSY_MASK           BIT(3)
--#define GLOBAL_CFG_RX_DMA_EN_MASK             BIT(2)
--#define GLOBAL_CFG_TX_DMA_BUSY_MASK           BIT(1)
--#define GLOBAL_CFG_TX_DMA_EN_MASK             BIT(0)
--
--#define REG_FWD_DSCP_BASE                     0x0010
--#define REG_FWD_BUF_BASE                      0x0014
--
--#define REG_HW_FWD_DSCP_CFG                   0x0018
--#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK         GENMASK(29, 28)
--#define HW_FWD_DSCP_SCATTER_LEN_MASK          GENMASK(17, 16)
--#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK      GENMASK(15, 0)
--
--#define REG_INT_STATUS(_n)            \
--      (((_n) == 4) ? 0x0730 :         \
--       ((_n) == 3) ? 0x0724 :         \
--       ((_n) == 2) ? 0x0720 :         \
--       ((_n) == 1) ? 0x0024 : 0x0020)
--
--#define REG_INT_ENABLE(_n)            \
--      (((_n) == 4) ? 0x0750 :         \
--       ((_n) == 3) ? 0x0744 :         \
--       ((_n) == 2) ? 0x0740 :         \
--       ((_n) == 1) ? 0x002c : 0x0028)
--
--/* QDMA_CSR_INT_ENABLE1 */
--#define RX15_COHERENT_INT_MASK                BIT(31)
--#define RX14_COHERENT_INT_MASK                BIT(30)
--#define RX13_COHERENT_INT_MASK                BIT(29)
--#define RX12_COHERENT_INT_MASK                BIT(28)
--#define RX11_COHERENT_INT_MASK                BIT(27)
--#define RX10_COHERENT_INT_MASK                BIT(26)
--#define RX9_COHERENT_INT_MASK         BIT(25)
--#define RX8_COHERENT_INT_MASK         BIT(24)
--#define RX7_COHERENT_INT_MASK         BIT(23)
--#define RX6_COHERENT_INT_MASK         BIT(22)
--#define RX5_COHERENT_INT_MASK         BIT(21)
--#define RX4_COHERENT_INT_MASK         BIT(20)
--#define RX3_COHERENT_INT_MASK         BIT(19)
--#define RX2_COHERENT_INT_MASK         BIT(18)
--#define RX1_COHERENT_INT_MASK         BIT(17)
--#define RX0_COHERENT_INT_MASK         BIT(16)
--#define TX7_COHERENT_INT_MASK         BIT(15)
--#define TX6_COHERENT_INT_MASK         BIT(14)
--#define TX5_COHERENT_INT_MASK         BIT(13)
--#define TX4_COHERENT_INT_MASK         BIT(12)
--#define TX3_COHERENT_INT_MASK         BIT(11)
--#define TX2_COHERENT_INT_MASK         BIT(10)
--#define TX1_COHERENT_INT_MASK         BIT(9)
--#define TX0_COHERENT_INT_MASK         BIT(8)
--#define CNT_OVER_FLOW_INT_MASK                BIT(7)
--#define IRQ1_FULL_INT_MASK            BIT(5)
--#define IRQ1_INT_MASK                 BIT(4)
--#define HWFWD_DSCP_LOW_INT_MASK               BIT(3)
--#define HWFWD_DSCP_EMPTY_INT_MASK     BIT(2)
--#define IRQ0_FULL_INT_MASK            BIT(1)
--#define IRQ0_INT_MASK                 BIT(0)
--
--#define TX_DONE_INT_MASK(_n)                                  \
--      ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK              \
--            : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
--
--#define INT_TX_MASK                                           \
--      (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK |                   \
--       IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
--
--#define INT_IDX0_MASK                                         \
--      (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK |        \
--       TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK |        \
--       TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK |        \
--       TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK |        \
--       RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK |        \
--       RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK |        \
--       RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK |        \
--       RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK |        \
--       RX15_COHERENT_INT_MASK | INT_TX_MASK)
--
--/* QDMA_CSR_INT_ENABLE2 */
--#define RX15_NO_CPU_DSCP_INT_MASK     BIT(31)
--#define RX14_NO_CPU_DSCP_INT_MASK     BIT(30)
--#define RX13_NO_CPU_DSCP_INT_MASK     BIT(29)
--#define RX12_NO_CPU_DSCP_INT_MASK     BIT(28)
--#define RX11_NO_CPU_DSCP_INT_MASK     BIT(27)
--#define RX10_NO_CPU_DSCP_INT_MASK     BIT(26)
--#define RX9_NO_CPU_DSCP_INT_MASK      BIT(25)
--#define RX8_NO_CPU_DSCP_INT_MASK      BIT(24)
--#define RX7_NO_CPU_DSCP_INT_MASK      BIT(23)
--#define RX6_NO_CPU_DSCP_INT_MASK      BIT(22)
--#define RX5_NO_CPU_DSCP_INT_MASK      BIT(21)
--#define RX4_NO_CPU_DSCP_INT_MASK      BIT(20)
--#define RX3_NO_CPU_DSCP_INT_MASK      BIT(19)
--#define RX2_NO_CPU_DSCP_INT_MASK      BIT(18)
--#define RX1_NO_CPU_DSCP_INT_MASK      BIT(17)
--#define RX0_NO_CPU_DSCP_INT_MASK      BIT(16)
--#define RX15_DONE_INT_MASK            BIT(15)
--#define RX14_DONE_INT_MASK            BIT(14)
--#define RX13_DONE_INT_MASK            BIT(13)
--#define RX12_DONE_INT_MASK            BIT(12)
--#define RX11_DONE_INT_MASK            BIT(11)
--#define RX10_DONE_INT_MASK            BIT(10)
--#define RX9_DONE_INT_MASK             BIT(9)
--#define RX8_DONE_INT_MASK             BIT(8)
--#define RX7_DONE_INT_MASK             BIT(7)
--#define RX6_DONE_INT_MASK             BIT(6)
--#define RX5_DONE_INT_MASK             BIT(5)
--#define RX4_DONE_INT_MASK             BIT(4)
--#define RX3_DONE_INT_MASK             BIT(3)
--#define RX2_DONE_INT_MASK             BIT(2)
--#define RX1_DONE_INT_MASK             BIT(1)
--#define RX0_DONE_INT_MASK             BIT(0)
--
--#define RX_DONE_INT_MASK                                      \
--      (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK |                \
--       RX2_DONE_INT_MASK | RX3_DONE_INT_MASK |                \
--       RX4_DONE_INT_MASK | RX7_DONE_INT_MASK |                \
--       RX8_DONE_INT_MASK | RX9_DONE_INT_MASK |                \
--       RX15_DONE_INT_MASK)
--#define INT_IDX1_MASK                                         \
--      (RX_DONE_INT_MASK |                                     \
--       RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK |  \
--       RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK |  \
--       RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK |  \
--       RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK |  \
--       RX15_NO_CPU_DSCP_INT_MASK)
--
--/* QDMA_CSR_INT_ENABLE5 */
--#define TX31_COHERENT_INT_MASK                BIT(31)
--#define TX30_COHERENT_INT_MASK                BIT(30)
--#define TX29_COHERENT_INT_MASK                BIT(29)
--#define TX28_COHERENT_INT_MASK                BIT(28)
--#define TX27_COHERENT_INT_MASK                BIT(27)
--#define TX26_COHERENT_INT_MASK                BIT(26)
--#define TX25_COHERENT_INT_MASK                BIT(25)
--#define TX24_COHERENT_INT_MASK                BIT(24)
--#define TX23_COHERENT_INT_MASK                BIT(23)
--#define TX22_COHERENT_INT_MASK                BIT(22)
--#define TX21_COHERENT_INT_MASK                BIT(21)
--#define TX20_COHERENT_INT_MASK                BIT(20)
--#define TX19_COHERENT_INT_MASK                BIT(19)
--#define TX18_COHERENT_INT_MASK                BIT(18)
--#define TX17_COHERENT_INT_MASK                BIT(17)
--#define TX16_COHERENT_INT_MASK                BIT(16)
--#define TX15_COHERENT_INT_MASK                BIT(15)
--#define TX14_COHERENT_INT_MASK                BIT(14)
--#define TX13_COHERENT_INT_MASK                BIT(13)
--#define TX12_COHERENT_INT_MASK                BIT(12)
--#define TX11_COHERENT_INT_MASK                BIT(11)
--#define TX10_COHERENT_INT_MASK                BIT(10)
--#define TX9_COHERENT_INT_MASK         BIT(9)
--#define TX8_COHERENT_INT_MASK         BIT(8)
--
--#define INT_IDX4_MASK                                         \
--      (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK |        \
--       TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK |      \
--       TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK |      \
--       TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK |      \
--       TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK |      \
--       TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK |      \
--       TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK |      \
--       TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK |      \
--       TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK |      \
--       TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK |      \
--       TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK |      \
--       TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
--
--#define REG_TX_IRQ_BASE(_n)           ((_n) ? 0x0048 : 0x0050)
--
--#define REG_TX_IRQ_CFG(_n)            ((_n) ? 0x004c : 0x0054)
--#define TX_IRQ_THR_MASK                       GENMASK(27, 16)
--#define TX_IRQ_DEPTH_MASK             GENMASK(11, 0)
--
--#define REG_IRQ_CLEAR_LEN(_n)         ((_n) ? 0x0064 : 0x0058)
--#define IRQ_CLEAR_LEN_MASK            GENMASK(7, 0)
--
--#define REG_IRQ_STATUS(_n)            ((_n) ? 0x0068 : 0x005c)
--#define IRQ_ENTRY_LEN_MASK            GENMASK(27, 16)
--#define IRQ_HEAD_IDX_MASK             GENMASK(11, 0)
--
--#define REG_TX_RING_BASE(_n)  \
--      (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
--
--#define REG_TX_RING_BLOCKING(_n)      \
--      (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
--
--#define TX_RING_IRQ_BLOCKING_MAP_MASK                 BIT(6)
--#define TX_RING_IRQ_BLOCKING_CFG_MASK                 BIT(4)
--#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK          BIT(2)
--#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK    BIT(1)
--#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK    BIT(0)
--
--#define REG_TX_CPU_IDX(_n)    \
--      (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
--
--#define TX_RING_CPU_IDX_MASK          GENMASK(15, 0)
--
--#define REG_TX_DMA_IDX(_n)    \
--      (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
--
--#define TX_RING_DMA_IDX_MASK          GENMASK(15, 0)
--
--#define IRQ_RING_IDX_MASK             GENMASK(20, 16)
--#define IRQ_DESC_IDX_MASK             GENMASK(15, 0)
--
--#define REG_RX_RING_BASE(_n)  \
--      (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
--
--#define REG_RX_RING_SIZE(_n)  \
--      (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
--
--#define RX_RING_THR_MASK              GENMASK(31, 16)
--#define RX_RING_SIZE_MASK             GENMASK(15, 0)
--
--#define REG_RX_CPU_IDX(_n)    \
--      (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
--
--#define RX_RING_CPU_IDX_MASK          GENMASK(15, 0)
--
--#define REG_RX_DMA_IDX(_n)    \
--      (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
--
--#define REG_RX_DELAY_INT_IDX(_n)      \
--      (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
--
--#define RX_DELAY_INT_MASK             GENMASK(15, 0)
--
--#define RX_RING_DMA_IDX_MASK          GENMASK(15, 0)
--
--#define REG_INGRESS_TRTCM_CFG         0x0070
--#define INGRESS_TRTCM_EN_MASK         BIT(31)
--#define INGRESS_TRTCM_MODE_MASK               BIT(30)
--#define INGRESS_SLOW_TICK_RATIO_MASK  GENMASK(29, 16)
--#define INGRESS_FAST_TICK_MASK                GENMASK(15, 0)
--
--#define REG_QUEUE_CLOSE_CFG(_n)               (0x00a0 + ((_n) & 0xfc))
--#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m)   BIT((_m) + (((_n) & 0x3) << 3))
--
--#define REG_TXQ_DIS_CFG_BASE(_n)      ((_n) ? 0x20a0 : 0x00a0)
--#define REG_TXQ_DIS_CFG(_n, _m)               (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
--
--#define REG_CNTR_CFG(_n)              (0x0400 + ((_n) << 3))
--#define CNTR_EN_MASK                  BIT(31)
--#define CNTR_ALL_CHAN_EN_MASK         BIT(30)
--#define CNTR_ALL_QUEUE_EN_MASK                BIT(29)
--#define CNTR_ALL_DSCP_RING_EN_MASK    BIT(28)
--#define CNTR_SRC_MASK                 GENMASK(27, 24)
--#define CNTR_DSCP_RING_MASK           GENMASK(20, 16)
--#define CNTR_CHAN_MASK                        GENMASK(7, 3)
--#define CNTR_QUEUE_MASK                       GENMASK(2, 0)
--
--#define REG_CNTR_VAL(_n)              (0x0404 + ((_n) << 3))
--
--#define REG_LMGR_INIT_CFG             0x1000
--#define LMGR_INIT_START                       BIT(31)
--#define LMGR_SRAM_MODE_MASK           BIT(30)
--#define HW_FWD_PKTSIZE_OVERHEAD_MASK  GENMASK(27, 20)
--#define HW_FWD_DESC_NUM_MASK          GENMASK(16, 0)
--
--#define REG_FWD_DSCP_LOW_THR          0x1004
--#define FWD_DSCP_LOW_THR_MASK         GENMASK(17, 0)
--
--#define REG_EGRESS_RATE_METER_CFG             0x100c
--#define EGRESS_RATE_METER_EN_MASK             BIT(31)
--#define EGRESS_RATE_METER_EQ_RATE_EN_MASK     BIT(17)
--#define EGRESS_RATE_METER_WINDOW_SZ_MASK      GENMASK(16, 12)
--#define EGRESS_RATE_METER_TIMESLICE_MASK      GENMASK(10, 0)
--
--#define REG_EGRESS_TRTCM_CFG          0x1010
--#define EGRESS_TRTCM_EN_MASK          BIT(31)
--#define EGRESS_TRTCM_MODE_MASK                BIT(30)
--#define EGRESS_SLOW_TICK_RATIO_MASK   GENMASK(29, 16)
--#define EGRESS_FAST_TICK_MASK         GENMASK(15, 0)
--
--#define TRTCM_PARAM_RW_MASK           BIT(31)
--#define TRTCM_PARAM_RW_DONE_MASK      BIT(30)
--#define TRTCM_PARAM_TYPE_MASK         GENMASK(29, 28)
--#define TRTCM_METER_GROUP_MASK                GENMASK(27, 26)
--#define TRTCM_PARAM_INDEX_MASK                GENMASK(23, 17)
--#define TRTCM_PARAM_RATE_TYPE_MASK    BIT(16)
--
--#define REG_TRTCM_CFG_PARAM(_n)               ((_n) + 0x4)
--#define REG_TRTCM_DATA_LOW(_n)                ((_n) + 0x8)
--#define REG_TRTCM_DATA_HIGH(_n)               ((_n) + 0xc)
--
--#define REG_TXWRR_MODE_CFG            0x1020
--#define TWRR_WEIGHT_SCALE_MASK                BIT(31)
--#define TWRR_WEIGHT_BASE_MASK         BIT(3)
--
--#define REG_TXWRR_WEIGHT_CFG          0x1024
--#define TWRR_RW_CMD_MASK              BIT(31)
--#define TWRR_RW_CMD_DONE              BIT(30)
--#define TWRR_CHAN_IDX_MASK            GENMASK(23, 19)
--#define TWRR_QUEUE_IDX_MASK           GENMASK(18, 16)
--#define TWRR_VALUE_MASK                       GENMASK(15, 0)
--
--#define REG_PSE_BUF_USAGE_CFG         0x1028
--#define PSE_BUF_ESTIMATE_EN_MASK      BIT(29)
--
--#define REG_CHAN_QOS_MODE(_n)         (0x1040 + ((_n) << 2))
--#define CHAN_QOS_MODE_MASK(_n)                GENMASK(2 + ((_n) << 2), (_n) << 2)
--
--#define REG_GLB_TRTCM_CFG             0x1080
--#define GLB_TRTCM_EN_MASK             BIT(31)
--#define GLB_TRTCM_MODE_MASK           BIT(30)
--#define GLB_SLOW_TICK_RATIO_MASK      GENMASK(29, 16)
--#define GLB_FAST_TICK_MASK            GENMASK(15, 0)
--
--#define REG_TXQ_CNGST_CFG             0x10a0
--#define TXQ_CNGST_DROP_EN             BIT(31)
--#define TXQ_CNGST_DEI_DROP_EN         BIT(30)
--
--#define REG_SLA_TRTCM_CFG             0x1150
--#define SLA_TRTCM_EN_MASK             BIT(31)
--#define SLA_TRTCM_MODE_MASK           BIT(30)
--#define SLA_SLOW_TICK_RATIO_MASK      GENMASK(29, 16)
--#define SLA_FAST_TICK_MASK            GENMASK(15, 0)
--
--/* CTRL */
--#define QDMA_DESC_DONE_MASK           BIT(31)
--#define QDMA_DESC_DROP_MASK           BIT(30) /* tx: drop - rx: overflow */
--#define QDMA_DESC_MORE_MASK           BIT(29) /* more SG elements */
--#define QDMA_DESC_DEI_MASK            BIT(25)
--#define QDMA_DESC_NO_DROP_MASK                BIT(24)
--#define QDMA_DESC_LEN_MASK            GENMASK(15, 0)
--/* DATA */
--#define QDMA_DESC_NEXT_ID_MASK                GENMASK(15, 0)
--/* TX MSG0 */
--#define QDMA_ETH_TXMSG_MIC_IDX_MASK   BIT(30)
--#define QDMA_ETH_TXMSG_SP_TAG_MASK    GENMASK(29, 14)
--#define QDMA_ETH_TXMSG_ICO_MASK               BIT(13)
--#define QDMA_ETH_TXMSG_UCO_MASK               BIT(12)
--#define QDMA_ETH_TXMSG_TCO_MASK               BIT(11)
--#define QDMA_ETH_TXMSG_TSO_MASK               BIT(10)
--#define QDMA_ETH_TXMSG_FAST_MASK      BIT(9)
--#define QDMA_ETH_TXMSG_OAM_MASK               BIT(8)
--#define QDMA_ETH_TXMSG_CHAN_MASK      GENMASK(7, 3)
--#define QDMA_ETH_TXMSG_QUEUE_MASK     GENMASK(2, 0)
--/* TX MSG1 */
--#define QDMA_ETH_TXMSG_NO_DROP                BIT(31)
--#define QDMA_ETH_TXMSG_METER_MASK     GENMASK(30, 24) /* 0x7f no meters */
--#define QDMA_ETH_TXMSG_FPORT_MASK     GENMASK(23, 20)
--#define QDMA_ETH_TXMSG_NBOQ_MASK      GENMASK(19, 15)
--#define QDMA_ETH_TXMSG_HWF_MASK               BIT(14)
--#define QDMA_ETH_TXMSG_HOP_MASK               BIT(13)
--#define QDMA_ETH_TXMSG_PTP_MASK               BIT(12)
--#define QDMA_ETH_TXMSG_ACNT_G1_MASK   GENMASK(10, 6)  /* 0x1f do not count */
--#define QDMA_ETH_TXMSG_ACNT_G0_MASK   GENMASK(5, 0)   /* 0x3f do not count */
--
--/* RX MSG1 */
--#define QDMA_ETH_RXMSG_DEI_MASK               BIT(31)
--#define QDMA_ETH_RXMSG_IP6_MASK               BIT(30)
--#define QDMA_ETH_RXMSG_IP4_MASK               BIT(29)
--#define QDMA_ETH_RXMSG_IP4F_MASK      BIT(28)
--#define QDMA_ETH_RXMSG_L4_VALID_MASK  BIT(27)
--#define QDMA_ETH_RXMSG_L4F_MASK               BIT(26)
--#define QDMA_ETH_RXMSG_SPORT_MASK     GENMASK(25, 21)
--#define QDMA_ETH_RXMSG_CRSN_MASK      GENMASK(20, 16)
--#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
--
--struct airoha_qdma_desc {
--      __le32 rsv;
--      __le32 ctrl;
--      __le32 addr;
--      __le32 data;
--      __le32 msg0;
--      __le32 msg1;
--      __le32 msg2;
--      __le32 msg3;
--};
--
--/* CTRL0 */
--#define QDMA_FWD_DESC_CTX_MASK                BIT(31)
--#define QDMA_FWD_DESC_RING_MASK               GENMASK(30, 28)
--#define QDMA_FWD_DESC_IDX_MASK                GENMASK(27, 16)
--#define QDMA_FWD_DESC_LEN_MASK                GENMASK(15, 0)
--/* CTRL1 */
--#define QDMA_FWD_DESC_FIRST_IDX_MASK  GENMASK(15, 0)
--/* CTRL2 */
--#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK       GENMASK(2, 0)
--
--struct airoha_qdma_fwd_desc {
--      __le32 addr;
--      __le32 ctrl0;
--      __le32 ctrl1;
--      __le32 ctrl2;
--      __le32 msg0;
--      __le32 msg1;
--      __le32 rsv0;
--      __le32 rsv1;
--};
--
--enum {
--      QDMA_INT_REG_IDX0,
--      QDMA_INT_REG_IDX1,
--      QDMA_INT_REG_IDX2,
--      QDMA_INT_REG_IDX3,
--      QDMA_INT_REG_IDX4,
--      QDMA_INT_REG_MAX
--};
--
--enum {
--      XSI_PCIE0_PORT,
--      XSI_PCIE1_PORT,
--      XSI_USB_PORT,
--      XSI_AE_PORT,
--      XSI_ETH_PORT,
--};
--
--enum {
--      XSI_PCIE0_VIP_PORT_MASK = BIT(22),
--      XSI_PCIE1_VIP_PORT_MASK = BIT(23),
--      XSI_USB_VIP_PORT_MASK   = BIT(25),
--      XSI_ETH_VIP_PORT_MASK   = BIT(24),
--};
--
--enum {
--      DEV_STATE_INITIALIZED,
--};
--
--enum {
--      CDM_CRSN_QSEL_Q1 = 1,
--      CDM_CRSN_QSEL_Q5 = 5,
--      CDM_CRSN_QSEL_Q6 = 6,
--      CDM_CRSN_QSEL_Q15 = 15,
--};
--
--enum {
--      CRSN_08 = 0x8,
--      CRSN_21 = 0x15, /* KA */
--      CRSN_22 = 0x16, /* hit bind and force route to CPU */
--      CRSN_24 = 0x18,
--      CRSN_25 = 0x19,
--};
--
--enum {
--      FE_PSE_PORT_CDM1,
--      FE_PSE_PORT_GDM1,
--      FE_PSE_PORT_GDM2,
--      FE_PSE_PORT_GDM3,
--      FE_PSE_PORT_PPE1,
--      FE_PSE_PORT_CDM2,
--      FE_PSE_PORT_CDM3,
--      FE_PSE_PORT_CDM4,
--      FE_PSE_PORT_PPE2,
--      FE_PSE_PORT_GDM4,
--      FE_PSE_PORT_CDM5,
--      FE_PSE_PORT_DROP = 0xf,
--};
--
--enum tx_sched_mode {
--      TC_SCH_WRR8,
--      TC_SCH_SP,
--      TC_SCH_WRR7,
--      TC_SCH_WRR6,
--      TC_SCH_WRR5,
--      TC_SCH_WRR4,
--      TC_SCH_WRR3,
--      TC_SCH_WRR2,
--};
--
--enum trtcm_param_type {
--      TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
--      TRTCM_TOKEN_RATE_MODE,
--      TRTCM_BUCKETSIZE_SHIFT_MODE,
--      TRTCM_BUCKET_COUNTER_MODE,
--};
--
--enum trtcm_mode_type {
--      TRTCM_COMMIT_MODE,
--      TRTCM_PEAK_MODE,
--};
--
--enum trtcm_param {
--      TRTCM_TICK_SEL = BIT(0),
--      TRTCM_PKT_MODE = BIT(1),
--      TRTCM_METER_MODE = BIT(2),
--};
--
--#define MIN_TOKEN_SIZE                                4096
--#define MAX_TOKEN_SIZE_OFFSET                 17
--#define TRTCM_TOKEN_RATE_MASK                 GENMASK(23, 6)
--#define TRTCM_TOKEN_RATE_FRACTION_MASK                GENMASK(5, 0)
--
--struct airoha_queue_entry {
--      union {
--              void *buf;
--              struct sk_buff *skb;
--      };
--      dma_addr_t dma_addr;
--      u16 dma_len;
--};
--
--struct airoha_queue {
--      struct airoha_qdma *qdma;
--
--      /* protect concurrent queue accesses */
--      spinlock_t lock;
--      struct airoha_queue_entry *entry;
--      struct airoha_qdma_desc *desc;
--      u16 head;
--      u16 tail;
--
--      int queued;
--      int ndesc;
--      int free_thr;
--      int buf_size;
--
--      struct napi_struct napi;
--      struct page_pool *page_pool;
--};
--
--struct airoha_tx_irq_queue {
--      struct airoha_qdma *qdma;
--
--      struct napi_struct napi;
--
--      int size;
--      u32 *q;
--};
--
--struct airoha_hw_stats {
--      /* protect concurrent hw_stats accesses */
--      spinlock_t lock;
--      struct u64_stats_sync syncp;
--
--      /* get_stats64 */
--      u64 rx_ok_pkts;
--      u64 tx_ok_pkts;
--      u64 rx_ok_bytes;
--      u64 tx_ok_bytes;
--      u64 rx_multicast;
--      u64 rx_errors;
--      u64 rx_drops;
--      u64 tx_drops;
--      u64 rx_crc_error;
--      u64 rx_over_errors;
--      /* ethtool stats */
--      u64 tx_broadcast;
--      u64 tx_multicast;
--      u64 tx_len[7];
--      u64 rx_broadcast;
--      u64 rx_fragment;
--      u64 rx_jabber;
--      u64 rx_len[7];
--};
--
--struct airoha_qdma {
--      struct airoha_eth *eth;
--      void __iomem *regs;
--
--      /* protect concurrent irqmask accesses */
--      spinlock_t irq_lock;
--      u32 irqmask[QDMA_INT_REG_MAX];
--      int irq;
--
--      struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
--
--      struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
--      struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
--
--      /* descriptor and packet buffers for qdma hw forward */
--      struct {
--              void *desc;
--              void *q;
--      } hfwd;
--};
--
--struct airoha_gdm_port {
--      struct airoha_qdma *qdma;
--      struct net_device *dev;
--      int id;
--
--      struct airoha_hw_stats stats;
--
--      DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
--
--      /* qos stats counters */
--      u64 cpu_tx_packets;
--      u64 fwd_tx_packets;
--};
--
--struct airoha_eth {
--      struct device *dev;
--
--      unsigned long state;
--      void __iomem *fe_regs;
--
--      struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
--      struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
--
--      struct net_device *napi_dev;
--
--      struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
--      struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
--};
--
--static u32 airoha_rr(void __iomem *base, u32 offset)
--{
--      return readl(base + offset);
--}
--
--static void airoha_wr(void __iomem *base, u32 offset, u32 val)
--{
--      writel(val, base + offset);
--}
--
--static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
--{
--      val |= (airoha_rr(base, offset) & ~mask);
--      airoha_wr(base, offset, val);
--
--      return val;
--}
--
--#define airoha_fe_rr(eth, offset)                             \
--      airoha_rr((eth)->fe_regs, (offset))
--#define airoha_fe_wr(eth, offset, val)                                \
--      airoha_wr((eth)->fe_regs, (offset), (val))
--#define airoha_fe_rmw(eth, offset, mask, val)                 \
--      airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
--#define airoha_fe_set(eth, offset, val)                               \
--      airoha_rmw((eth)->fe_regs, (offset), 0, (val))
--#define airoha_fe_clear(eth, offset, val)                     \
--      airoha_rmw((eth)->fe_regs, (offset), (val), 0)
--
--#define airoha_qdma_rr(qdma, offset)                          \
--      airoha_rr((qdma)->regs, (offset))
--#define airoha_qdma_wr(qdma, offset, val)                     \
--      airoha_wr((qdma)->regs, (offset), (val))
--#define airoha_qdma_rmw(qdma, offset, mask, val)              \
--      airoha_rmw((qdma)->regs, (offset), (mask), (val))
--#define airoha_qdma_set(qdma, offset, val)                    \
--      airoha_rmw((qdma)->regs, (offset), 0, (val))
--#define airoha_qdma_clear(qdma, offset, val)                  \
--      airoha_rmw((qdma)->regs, (offset), (val), 0)
--
--static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
--                                  u32 clear, u32 set)
--{
--      unsigned long flags;
--
--      if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
--              return;
--
--      spin_lock_irqsave(&qdma->irq_lock, flags);
--
--      qdma->irqmask[index] &= ~clear;
--      qdma->irqmask[index] |= set;
--      airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
--      /* Read irq_enable register in order to guarantee the update above
--       * completes in the spinlock critical section.
--       */
--      airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
--
--      spin_unlock_irqrestore(&qdma->irq_lock, flags);
--}
--
--static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
--                                 u32 mask)
--{
--      airoha_qdma_set_irqmask(qdma, index, 0, mask);
--}
--
--static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
--                                  u32 mask)
--{
--      airoha_qdma_set_irqmask(qdma, index, mask, 0);
--}
--
--static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
--{
--      /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
--       * GDM{2,3,4} can be used as wan port connected to an external
--       * phy module.
--       */
--      return port->id == 1;
--}
--
--static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
--{
--      struct airoha_eth *eth = port->qdma->eth;
--      u32 val, reg;
--
--      reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
--                                         : REG_FE_WAN_MAC_H;
--      val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
--      airoha_fe_wr(eth, reg, val);
--
--      val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
--      airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
--      airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
--}
--
--static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
--                                      u32 val)
--{
--      airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
--                    FIELD_PREP(GDM_OCFQ_MASK, val));
--      airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
--                    FIELD_PREP(GDM_MCFQ_MASK, val));
--      airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
--                    FIELD_PREP(GDM_BCFQ_MASK, val));
--      airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
--                    FIELD_PREP(GDM_UCFQ_MASK, val));
--}
--
--static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
--{
--      u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
--      u32 vip_port, cfg_addr;
--
--      switch (port) {
--      case XSI_PCIE0_PORT:
--              vip_port = XSI_PCIE0_VIP_PORT_MASK;
--              cfg_addr = REG_GDM_FWD_CFG(3);
--              break;
--      case XSI_PCIE1_PORT:
--              vip_port = XSI_PCIE1_VIP_PORT_MASK;
--              cfg_addr = REG_GDM_FWD_CFG(3);
--              break;
--      case XSI_USB_PORT:
--              vip_port = XSI_USB_VIP_PORT_MASK;
--              cfg_addr = REG_GDM_FWD_CFG(4);
--              break;
--      case XSI_ETH_PORT:
--              vip_port = XSI_ETH_VIP_PORT_MASK;
--              cfg_addr = REG_GDM_FWD_CFG(4);
--              break;
--      default:
--              return -EINVAL;
--      }
--
--      if (enable) {
--              airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
--              airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
--      } else {
--              airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
--              airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
--      }
--
--      airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
--
--      return 0;
--}
--
--static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
--{
--      const int port_list[] = {
--              XSI_PCIE0_PORT,
--              XSI_PCIE1_PORT,
--              XSI_USB_PORT,
--              XSI_ETH_PORT
--      };
--      int i, err;
--
--      for (i = 0; i < ARRAY_SIZE(port_list); i++) {
--              err = airoha_set_gdm_port(eth, port_list[i], enable);
--              if (err)
--                      goto error;
--      }
--
--      return 0;
--
--error:
--      for (i--; i >= 0; i--)
--              airoha_set_gdm_port(eth, port_list[i], false);
--
--      return err;
--}
--
--static void airoha_fe_maccr_init(struct airoha_eth *eth)
--{
--      int p;
--
--      for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
--              airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
--                            GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
--                            GDM_DROP_CRC_ERR);
--              airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
--                                          FE_PSE_PORT_CDM1);
--              airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
--                            GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
--                            FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
--                            FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
--      }
--
--      airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
--                    FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
--
--      airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
--}
--
--static void airoha_fe_vip_setup(struct airoha_eth *eth)
--{
--      airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
--      airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
--
--      airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
--      airoha_fe_wr(eth, REG_FE_VIP_EN(4),
--                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
--                   PATN_EN_MASK);
--
--      airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
--      airoha_fe_wr(eth, REG_FE_VIP_EN(6),
--                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
--                   PATN_EN_MASK);
--
--      airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
--      airoha_fe_wr(eth, REG_FE_VIP_EN(7),
--                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
--                   PATN_EN_MASK);
--
--      /* BOOTP (0x43) */
--      airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
--      airoha_fe_wr(eth, REG_FE_VIP_EN(8),
--                   PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
--                   FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
--
--      /* BOOTP (0x44) */
--      airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
--      airoha_fe_wr(eth, REG_FE_VIP_EN(9),
--                   PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
--                   FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
--
--      /* ISAKMP */
--      airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
--      airoha_fe_wr(eth, REG_FE_VIP_EN(10),
--                   PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
--                   FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
--
--      airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
--      airoha_fe_wr(eth, REG_FE_VIP_EN(11),
--                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
--                   PATN_EN_MASK);
--
--      /* DHCPv6 */
--      airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
--      airoha_fe_wr(eth, REG_FE_VIP_EN(12),
--                   PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
--                   FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
--
--      airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
--      airoha_fe_wr(eth, REG_FE_VIP_EN(19),
--                   PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
--                   PATN_EN_MASK);
--
--      /* ETH->ETH_P_1905 (0x893a) */
--      airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
--      airoha_fe_wr(eth, REG_FE_VIP_EN(20),
--                   PATN_FCPU_EN_MASK | PATN_EN_MASK);
--
--      airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
--      airoha_fe_wr(eth, REG_FE_VIP_EN(21),
--                   PATN_FCPU_EN_MASK | PATN_EN_MASK);
--}
--
--static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
--                                           u32 port, u32 queue)
--{
--      u32 val;
--
--      airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
--                    PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
--                    FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
--                    FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
--      val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
--
--      return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
--}
--
--static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
--                                            u32 port, u32 queue, u32 val)
--{
--      airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
--                    FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
--      airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
--                    PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
--                    PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
--                    FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
--                    FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
--                    PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
--}
--
--static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
--{
--      u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
--
--      return FIELD_GET(PSE_ALLRSV_MASK, val);
--}
--
--static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
--                                  u32 port, u32 queue, u32 val)
--{
--      u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
--      u32 tmp, all_rsv, fq_limit;
--
--      airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
--
--      /* modify all rsv */
--      all_rsv = airoha_fe_get_pse_all_rsv(eth);
--      all_rsv += (val - orig_val);
--      airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
--                    FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
--
--      /* modify hthd */
--      tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
--      fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
--      tmp = fq_limit - all_rsv - 0x20;
--      airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
--                    PSE_SHARE_USED_HTHD_MASK,
--                    FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
--
--      tmp = fq_limit - all_rsv - 0x100;
--      airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
--                    PSE_SHARE_USED_MTHD_MASK,
--                    FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
--      tmp = (3 * tmp) >> 2;
--      airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
--                    PSE_SHARE_USED_LTHD_MASK,
--                    FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
--
--      return 0;
--}
--
--static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
--{
--      const u32 pse_port_num_queues[] = {
--              [FE_PSE_PORT_CDM1] = 6,
--              [FE_PSE_PORT_GDM1] = 6,
--              [FE_PSE_PORT_GDM2] = 32,
--              [FE_PSE_PORT_GDM3] = 6,
--              [FE_PSE_PORT_PPE1] = 4,
--              [FE_PSE_PORT_CDM2] = 6,
--              [FE_PSE_PORT_CDM3] = 8,
--              [FE_PSE_PORT_CDM4] = 10,
--              [FE_PSE_PORT_PPE2] = 4,
--              [FE_PSE_PORT_GDM4] = 2,
--              [FE_PSE_PORT_CDM5] = 2,
--      };
--      u32 all_rsv;
--      int q;
--
--      all_rsv = airoha_fe_get_pse_all_rsv(eth);
--      /* hw misses PPE2 oq rsv */
--      all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
--      airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
--
--      /* CMD1 */
--      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
--              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
--                                       PSE_QUEUE_RSV_PAGES);
--      /* GMD1 */
--      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
--              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
--                                       PSE_QUEUE_RSV_PAGES);
--      /* GMD2 */
--      for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
--              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
--      /* GMD3 */
--      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
--              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
--                                       PSE_QUEUE_RSV_PAGES);
--      /* PPE1 */
--      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
--              if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
--                      airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
--                                               PSE_QUEUE_RSV_PAGES);
--              else
--                      airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
--      }
--      /* CDM2 */
--      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
--              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
--                                       PSE_QUEUE_RSV_PAGES);
--      /* CDM3 */
--      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
--              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
--      /* CDM4 */
--      for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
--              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
--                                       PSE_QUEUE_RSV_PAGES);
--      /* PPE2 */
--      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
--              if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
--                      airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
--                                               PSE_QUEUE_RSV_PAGES);
--              else
--                      airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
--      }
--      /* GMD4 */
--      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
--              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
--                                       PSE_QUEUE_RSV_PAGES);
--      /* CDM5 */
--      for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
--              airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
--                                       PSE_QUEUE_RSV_PAGES);
--}
--
--static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
--{
--      int i;
--
--      for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
--              int err, j;
--              u32 val;
--
--              airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
--
--              val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
--                    MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
--              airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
--              err = read_poll_timeout(airoha_fe_rr, val,
--                                      val & MC_VLAN_CFG_CMD_DONE_MASK,
--                                      USEC_PER_MSEC, 5 * USEC_PER_MSEC,
--                                      false, eth, REG_MC_VLAN_CFG);
--              if (err)
--                      return err;
--
--              for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
--                      airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
--
--                      val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
--                            FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
--                            MC_VLAN_CFG_RW_MASK;
--                      airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
--                      err = read_poll_timeout(airoha_fe_rr, val,
--                                              val & MC_VLAN_CFG_CMD_DONE_MASK,
--                                              USEC_PER_MSEC,
--                                              5 * USEC_PER_MSEC, false, eth,
--                                              REG_MC_VLAN_CFG);
--                      if (err)
--                              return err;
--              }
--      }
--
--      return 0;
--}
--
--static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
--{
--      /* CDM1_CRSN_QSEL */
--      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
--                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
--                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
--                               CDM_CRSN_QSEL_Q1));
--      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
--                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
--                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
--                               CDM_CRSN_QSEL_Q1));
--      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
--                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
--                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
--                               CDM_CRSN_QSEL_Q1));
--      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
--                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
--                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
--                               CDM_CRSN_QSEL_Q6));
--      airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
--                    CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
--                    FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
--                               CDM_CRSN_QSEL_Q1));
--      /* CDM2_CRSN_QSEL */
--      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
--                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
--                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
--                               CDM_CRSN_QSEL_Q1));
--      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
--                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
--                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
--                               CDM_CRSN_QSEL_Q1));
--      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
--                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
--                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
--                               CDM_CRSN_QSEL_Q1));
--      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
--                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
--                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
--                               CDM_CRSN_QSEL_Q6));
--      airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
--                    CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
--                    FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
--                               CDM_CRSN_QSEL_Q1));
--}
--
--static int airoha_fe_init(struct airoha_eth *eth)
--{
--      airoha_fe_maccr_init(eth);
--
--      /* PSE IQ reserve */
--      airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
--                    FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
--      airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
--                    PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
--                    FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
--                    FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
--
--      /* enable FE copy engine for MC/KA/DPI */
--      airoha_fe_wr(eth, REG_FE_PCE_CFG,
--                   PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
--      /* set vip queue selection to ring 1 */
--      airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
--                    FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
--      airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
--                    FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
--      /* set GDM4 source interface offset to 8 */
--      airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
--                    GDM4_SPORT_OFF2_MASK |
--                    GDM4_SPORT_OFF1_MASK |
--                    GDM4_SPORT_OFF0_MASK,
--                    FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
--                    FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
--                    FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
--
--      /* set PSE Page as 128B */
--      airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
--                    FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
--                    FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
--                    FE_DMA_GLO_PG_SZ_MASK);
--      airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
--                   FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
--                   FE_RST_GDM4_MBI_ARB_MASK);
--      usleep_range(1000, 2000);
--
--      /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
--       * connect other rings to PSE Port0 OQ-0
--       */
--      airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
--      airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
--      airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
--      airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
--
--      airoha_fe_vip_setup(eth);
--      airoha_fe_pse_ports_init(eth);
--
--      airoha_fe_set(eth, REG_GDM_MISC_CFG,
--                    GDM2_RDM_ACK_WAIT_PREF_MASK |
--                    GDM2_CHN_VLD_MODE_MASK);
--      airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK,
--                    FIELD_PREP(CDM2_OAM_QSEL_MASK, 15));
--
--      /* init fragment and assemble Force Port */
--      /* NPU Core-3, NPU Bridge Channel-3 */
--      airoha_fe_rmw(eth, REG_IP_FRAG_FP,
--                    IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
--                    FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
--                    FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
--      /* QDMA LAN, RX Ring-22 */
--      airoha_fe_rmw(eth, REG_IP_FRAG_FP,
--                    IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
--                    FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
--                    FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
--
--      airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
--      airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
--
--      airoha_fe_crsn_qsel_init(eth);
--
--      airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
--      airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
--
--      /* default aging mode for mbi unlock issue */
--      airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
--                    MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
--                    FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
--                    FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
--
--      /* disable IFC by default */
--      airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
--
--      /* enable 1:N vlan action, init vlan table */
--      airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
--
--      return airoha_fe_mc_vlan_clear(eth);
--}
--
--static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
--{
--      enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
--      struct airoha_qdma *qdma = q->qdma;
--      struct airoha_eth *eth = qdma->eth;
--      int qid = q - &qdma->q_rx[0];
--      int nframes = 0;
--
--      while (q->queued < q->ndesc - 1) {
--              struct airoha_queue_entry *e = &q->entry[q->head];
--              struct airoha_qdma_desc *desc = &q->desc[q->head];
--              struct page *page;
--              int offset;
--              u32 val;
--
--              page = page_pool_dev_alloc_frag(q->page_pool, &offset,
--                                              q->buf_size);
--              if (!page)
--                      break;
--
--              q->head = (q->head + 1) % q->ndesc;
--              q->queued++;
--              nframes++;
--
--              e->buf = page_address(page) + offset;
--              e->dma_addr = page_pool_get_dma_addr(page) + offset;
--              e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
--
--              dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
--                                         dir);
--
--              val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
--              WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
--              WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
--              val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
--              WRITE_ONCE(desc->data, cpu_to_le32(val));
--              WRITE_ONCE(desc->msg0, 0);
--              WRITE_ONCE(desc->msg1, 0);
--              WRITE_ONCE(desc->msg2, 0);
--              WRITE_ONCE(desc->msg3, 0);
--
--              airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
--                              RX_RING_CPU_IDX_MASK,
--                              FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
--      }
--
--      return nframes;
--}
--
--static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
--                                  struct airoha_qdma_desc *desc)
--{
--      u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
--
--      sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
--      switch (sport) {
--      case 0x10 ... 0x13:
--              port = 0;
--              break;
--      case 0x2 ... 0x4:
--              port = sport - 1;
--              break;
--      default:
--              return -EINVAL;
--      }
--
--      return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
--}
--
--static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
--{
--      enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
--      struct airoha_qdma *qdma = q->qdma;
--      struct airoha_eth *eth = qdma->eth;
--      int qid = q - &qdma->q_rx[0];
--      int done = 0;
--
--      while (done < budget) {
--              struct airoha_queue_entry *e = &q->entry[q->tail];
--              struct airoha_qdma_desc *desc = &q->desc[q->tail];
--              dma_addr_t dma_addr = le32_to_cpu(desc->addr);
--              u32 desc_ctrl = le32_to_cpu(desc->ctrl);
--              struct sk_buff *skb;
--              int len, p;
--
--              if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
--                      break;
--
--              if (!dma_addr)
--                      break;
--
--              len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
--              if (!len)
--                      break;
--
--              q->tail = (q->tail + 1) % q->ndesc;
--              q->queued--;
--
--              dma_sync_single_for_cpu(eth->dev, dma_addr,
--                                      SKB_WITH_OVERHEAD(q->buf_size), dir);
--
--              p = airoha_qdma_get_gdm_port(eth, desc);
--              if (p < 0 || !eth->ports[p]) {
--                      page_pool_put_full_page(q->page_pool,
--                                              virt_to_head_page(e->buf),
--                                              true);
--                      continue;
--              }
--
--              skb = napi_build_skb(e->buf, q->buf_size);
--              if (!skb) {
--                      page_pool_put_full_page(q->page_pool,
--                                              virt_to_head_page(e->buf),
--                                              true);
--                      break;
--              }
--
--              skb_reserve(skb, 2);
--              __skb_put(skb, len);
--              skb_mark_for_recycle(skb);
--              skb->dev = eth->ports[p]->dev;
--              skb->protocol = eth_type_trans(skb, skb->dev);
--              skb->ip_summed = CHECKSUM_UNNECESSARY;
--              skb_record_rx_queue(skb, qid);
--              napi_gro_receive(&q->napi, skb);
--
--              done++;
--      }
--      airoha_qdma_fill_rx_queue(q);
--
--      return done;
--}
--
--static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
--{
--      struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
--      int cur, done = 0;
--
--      do {
--              cur = airoha_qdma_rx_process(q, budget - done);
--              done += cur;
--      } while (cur && done < budget);
--
--      if (done < budget && napi_complete(napi))
--              airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
--                                     RX_DONE_INT_MASK);
--
--      return done;
--}
--
--static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
--                                   struct airoha_qdma *qdma, int ndesc)
--{
--      const struct page_pool_params pp_params = {
--              .order = 0,
--              .pool_size = 256,
--              .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
--                       PP_FLAG_PAGE_FRAG,
--              .dma_dir = DMA_FROM_DEVICE,
--              .max_len = PAGE_SIZE,
--              .nid = NUMA_NO_NODE,
--              .dev = qdma->eth->dev,
--              .napi = &q->napi,
--      };
--      struct airoha_eth *eth = qdma->eth;
--      int qid = q - &qdma->q_rx[0], thr;
--      dma_addr_t dma_addr;
--
--      q->buf_size = PAGE_SIZE / 2;
--      q->ndesc = ndesc;
--      q->qdma = qdma;
--
--      q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
--                              GFP_KERNEL);
--      if (!q->entry)
--              return -ENOMEM;
--
--      q->page_pool = page_pool_create(&pp_params);
--      if (IS_ERR(q->page_pool)) {
--              int err = PTR_ERR(q->page_pool);
--
--              q->page_pool = NULL;
--              return err;
--      }
--
--      q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
--                                    &dma_addr, GFP_KERNEL);
--      if (!q->desc)
--              return -ENOMEM;
--
--      netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
--
--      airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
--      airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
--                      RX_RING_SIZE_MASK,
--                      FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
--
--      thr = clamp(ndesc >> 3, 1, 32);
--      airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
--                      FIELD_PREP(RX_RING_THR_MASK, thr));
--      airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
--                      FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
--
--      airoha_qdma_fill_rx_queue(q);
--
--      return 0;
--}
--
--static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
--{
--      struct airoha_eth *eth = q->qdma->eth;
--
--      while (q->queued) {
--              struct airoha_queue_entry *e = &q->entry[q->tail];
--              struct page *page = virt_to_head_page(e->buf);
--
--              dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
--                                      page_pool_get_dma_dir(q->page_pool));
--              page_pool_put_full_page(q->page_pool, page, false);
--              q->tail = (q->tail + 1) % q->ndesc;
--              q->queued--;
--      }
--}
--
--static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
--{
--      int i;
--
--      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
--              int err;
--
--              if (!(RX_DONE_INT_MASK & BIT(i))) {
--                      /* rx-queue not binded to irq */
--                      continue;
--              }
--
--              err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
--                                              RX_DSCP_NUM(i));
--              if (err)
--                      return err;
--      }
--
--      return 0;
--}
--
--static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
--{
--      struct airoha_tx_irq_queue *irq_q;
--      int id, done = 0, irq_queued;
--      struct airoha_qdma *qdma;
--      struct airoha_eth *eth;
--      u32 status, head;
--
--      irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
--      qdma = irq_q->qdma;
--      id = irq_q - &qdma->q_tx_irq[0];
--      eth = qdma->eth;
--
--      status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
--      head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
--      head = head % irq_q->size;
--      irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
--
--      while (irq_queued > 0 && done < budget) {
--              u32 qid, val = irq_q->q[head];
--              struct airoha_qdma_desc *desc;
--              struct airoha_queue_entry *e;
--              struct airoha_queue *q;
--              u32 index, desc_ctrl;
--              struct sk_buff *skb;
--
--              if (val == 0xff)
--                      break;
--
--              irq_q->q[head] = 0xff; /* mark as done */
--              head = (head + 1) % irq_q->size;
--              irq_queued--;
--              done++;
--
--              qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
--              if (qid >= ARRAY_SIZE(qdma->q_tx))
--                      continue;
--
--              q = &qdma->q_tx[qid];
--              if (!q->ndesc)
--                      continue;
--
--              index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
--              if (index >= q->ndesc)
--                      continue;
--
--              spin_lock_bh(&q->lock);
--
--              if (!q->queued)
--                      goto unlock;
--
--              desc = &q->desc[index];
--              desc_ctrl = le32_to_cpu(desc->ctrl);
--
--              if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
--                  !(desc_ctrl & QDMA_DESC_DROP_MASK))
--                      goto unlock;
--
--              e = &q->entry[index];
--              skb = e->skb;
--
--              dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
--                               DMA_TO_DEVICE);
--              memset(e, 0, sizeof(*e));
--              WRITE_ONCE(desc->msg0, 0);
--              WRITE_ONCE(desc->msg1, 0);
--              q->queued--;
--
--              /* completion ring can report out-of-order indexes if hw QoS
--               * is enabled and packets with different priority are queued
--               * to same DMA ring. Take into account possible out-of-order
--               * reports incrementing DMA ring tail pointer
--               */
--              while (q->tail != q->head && !q->entry[q->tail].dma_addr)
--                      q->tail = (q->tail + 1) % q->ndesc;
--
--              if (skb) {
--                      u16 queue = skb_get_queue_mapping(skb);
--                      struct netdev_queue *txq;
--
--                      txq = netdev_get_tx_queue(skb->dev, queue);
--                      netdev_tx_completed_queue(txq, 1, skb->len);
--                      if (netif_tx_queue_stopped(txq) &&
--                          q->ndesc - q->queued >= q->free_thr)
--                              netif_tx_wake_queue(txq);
--
--                      dev_kfree_skb_any(skb);
--              }
--unlock:
--              spin_unlock_bh(&q->lock);
--      }
--
--      if (done) {
--              int i, len = done >> 7;
--
--              for (i = 0; i < len; i++)
--                      airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
--                                      IRQ_CLEAR_LEN_MASK, 0x80);
--              airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
--                              IRQ_CLEAR_LEN_MASK, (done & 0x7f));
--      }
--
--      if (done < budget && napi_complete(napi))
--              airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
--                                     TX_DONE_INT_MASK(id));
--
--      return done;
--}
--
--static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
--                                   struct airoha_qdma *qdma, int size)
--{
--      struct airoha_eth *eth = qdma->eth;
--      int i, qid = q - &qdma->q_tx[0];
--      dma_addr_t dma_addr;
--
--      spin_lock_init(&q->lock);
--      q->ndesc = size;
--      q->qdma = qdma;
--      q->free_thr = 1 + MAX_SKB_FRAGS;
--
--      q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
--                              GFP_KERNEL);
--      if (!q->entry)
--              return -ENOMEM;
--
--      q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
--                                    &dma_addr, GFP_KERNEL);
--      if (!q->desc)
--              return -ENOMEM;
--
--      for (i = 0; i < q->ndesc; i++) {
--              u32 val;
--
--              val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
--              WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
--      }
--
--      /* xmit ring drop default setting */
--      airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
--                      TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
--
--      airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
--      airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
--                      FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
--      airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
--                      FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
--
--      return 0;
--}
--
--static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
--                                 struct airoha_qdma *qdma, int size)
--{
--      int id = irq_q - &qdma->q_tx_irq[0];
--      struct airoha_eth *eth = qdma->eth;
--      dma_addr_t dma_addr;
--
--      netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
--                        airoha_qdma_tx_napi_poll);
--      irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
--                                     &dma_addr, GFP_KERNEL);
--      if (!irq_q->q)
--              return -ENOMEM;
--
--      memset(irq_q->q, 0xff, size * sizeof(u32));
--      irq_q->size = size;
--      irq_q->qdma = qdma;
--
--      airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
--      airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
--                      FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
--      airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
--                      FIELD_PREP(TX_IRQ_THR_MASK, 1));
--
--      return 0;
--}
--
--static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
--{
--      int i, err;
--
--      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
--              err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
--                                            IRQ_QUEUE_LEN(i));
--              if (err)
--                      return err;
--      }
--
--      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
--              err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
--                                              TX_DSCP_NUM);
--              if (err)
--                      return err;
--      }
--
--      return 0;
--}
--
--static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
--{
--      struct airoha_eth *eth = q->qdma->eth;
--
--      spin_lock_bh(&q->lock);
--      while (q->queued) {
--              struct airoha_queue_entry *e = &q->entry[q->tail];
--
--              dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
--                               DMA_TO_DEVICE);
--              dev_kfree_skb_any(e->skb);
--              e->skb = NULL;
--
--              q->tail = (q->tail + 1) % q->ndesc;
--              q->queued--;
--      }
--      spin_unlock_bh(&q->lock);
--}
--
--static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
--{
--      struct airoha_eth *eth = qdma->eth;
--      dma_addr_t dma_addr;
--      u32 status;
--      int size;
--
--      size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
--      qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
--                                            GFP_KERNEL);
--      if (!qdma->hfwd.desc)
--              return -ENOMEM;
--
--      airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
--
--      size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
--      qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
--                                         GFP_KERNEL);
--      if (!qdma->hfwd.q)
--              return -ENOMEM;
--
--      airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
--
--      airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
--                      HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
--                      FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
--      airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
--                      FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
--      airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
--                      LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
--                      HW_FWD_DESC_NUM_MASK,
--                      FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
--                      LMGR_INIT_START);
--
--      return read_poll_timeout(airoha_qdma_rr, status,
--                               !(status & LMGR_INIT_START), USEC_PER_MSEC,
--                               30 * USEC_PER_MSEC, true, qdma,
--                               REG_LMGR_INIT_CFG);
--}
--
--static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
--{
--      airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
--      airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
--
--      airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
--                        PSE_BUF_ESTIMATE_EN_MASK);
--
--      airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
--                      EGRESS_RATE_METER_EN_MASK |
--                      EGRESS_RATE_METER_EQ_RATE_EN_MASK);
--      /* 2047us x 31 = 63.457ms */
--      airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
--                      EGRESS_RATE_METER_WINDOW_SZ_MASK,
--                      FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
--      airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
--                      EGRESS_RATE_METER_TIMESLICE_MASK,
--                      FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
--
--      /* ratelimit init */
--      airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
--      /* fast-tick 25us */
--      airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
--                      FIELD_PREP(GLB_FAST_TICK_MASK, 25));
--      airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
--                      FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
--
--      airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
--      airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
--                      FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
--      airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
--                      EGRESS_SLOW_TICK_RATIO_MASK,
--                      FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
--
--      airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
--      airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
--                        INGRESS_TRTCM_MODE_MASK);
--      airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
--                      FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
--      airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
--                      INGRESS_SLOW_TICK_RATIO_MASK,
--                      FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
--
--      airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
--      airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
--                      FIELD_PREP(SLA_FAST_TICK_MASK, 25));
--      airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
--                      FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
--}
--
--static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
--{
--      int i;
--
--      for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
--              /* Tx-cpu transferred count */
--              airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
--              airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
--                             CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
--                             CNTR_ALL_DSCP_RING_EN_MASK |
--                             FIELD_PREP(CNTR_CHAN_MASK, i));
--              /* Tx-fwd transferred count */
--              airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
--              airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
--                             CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
--                             CNTR_ALL_DSCP_RING_EN_MASK |
--                             FIELD_PREP(CNTR_SRC_MASK, 1) |
--                             FIELD_PREP(CNTR_CHAN_MASK, i));
--      }
--}
--
--static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
--{
--      int i;
--
--      /* clear pending irqs */
--      for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
--              airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
--
--      /* setup irqs */
--      airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
--      airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
--      airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
--
--      /* setup irq binding */
--      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
--              if (!qdma->q_tx[i].ndesc)
--                      continue;
--
--              if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
--                      airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
--                                      TX_RING_IRQ_BLOCKING_CFG_MASK);
--              else
--                      airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
--                                        TX_RING_IRQ_BLOCKING_CFG_MASK);
--      }
--
--      airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
--                     GLOBAL_CFG_RX_2B_OFFSET_MASK |
--                     FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
--                     GLOBAL_CFG_CPU_TXR_RR_MASK |
--                     GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
--                     GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
--                     GLOBAL_CFG_MULTICAST_EN_MASK |
--                     GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
--                     GLOBAL_CFG_TX_WB_DONE_MASK |
--                     FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
--
--      airoha_qdma_init_qos(qdma);
--
--      /* disable qdma rx delay interrupt */
--      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
--              if (!qdma->q_rx[i].ndesc)
--                      continue;
--
--              airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
--                                RX_DELAY_INT_MASK);
--      }
--
--      airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
--                      TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
--      airoha_qdma_init_qos_stats(qdma);
--
--      return 0;
--}
--
--static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
--{
--      struct airoha_qdma *qdma = dev_instance;
--      u32 intr[ARRAY_SIZE(qdma->irqmask)];
--      int i;
--
--      for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
--              intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
--              intr[i] &= qdma->irqmask[i];
--              airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
--      }
--
--      if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
--              return IRQ_NONE;
--
--      if (intr[1] & RX_DONE_INT_MASK) {
--              airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
--                                      RX_DONE_INT_MASK);
--
--              for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
--                      if (!qdma->q_rx[i].ndesc)
--                              continue;
--
--                      if (intr[1] & BIT(i))
--                              napi_schedule(&qdma->q_rx[i].napi);
--              }
--      }
--
--      if (intr[0] & INT_TX_MASK) {
--              for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
--                      if (!(intr[0] & TX_DONE_INT_MASK(i)))
--                              continue;
--
--                      airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
--                                              TX_DONE_INT_MASK(i));
--                      napi_schedule(&qdma->q_tx_irq[i].napi);
--              }
--      }
--
--      return IRQ_HANDLED;
--}
--
--static int airoha_qdma_init(struct platform_device *pdev,
--                          struct airoha_eth *eth,
--                          struct airoha_qdma *qdma)
--{
--      int err, id = qdma - &eth->qdma[0];
--      const char *res;
--
--      spin_lock_init(&qdma->irq_lock);
--      qdma->eth = eth;
--
--      res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
--      if (!res)
--              return -ENOMEM;
--
--      qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
--      if (IS_ERR(qdma->regs))
--              return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
--                                   "failed to iomap qdma%d regs\n", id);
--
--      qdma->irq = platform_get_irq(pdev, 4 * id);
--      if (qdma->irq < 0)
--              return qdma->irq;
--
--      err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
--                             IRQF_SHARED, KBUILD_MODNAME, qdma);
--      if (err)
--              return err;
--
--      err = airoha_qdma_init_rx(qdma);
--      if (err)
--              return err;
--
--      err = airoha_qdma_init_tx(qdma);
--      if (err)
--              return err;
--
--      err = airoha_qdma_init_hfwd_queues(qdma);
--      if (err)
--              return err;
--
--      return airoha_qdma_hw_init(qdma);
--}
--
--static int airoha_hw_init(struct platform_device *pdev,
--                        struct airoha_eth *eth)
--{
--      int err, i;
--
--      /* disable xsi */
--      err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
--                                      eth->xsi_rsts);
--      if (err)
--              return err;
--
--      err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
--      if (err)
--              return err;
--
--      msleep(20);
--      err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
--      if (err)
--              return err;
--
--      msleep(20);
--      err = airoha_fe_init(eth);
--      if (err)
--              return err;
--
--      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
--              err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
--              if (err)
--                      return err;
--      }
--
--      set_bit(DEV_STATE_INITIALIZED, &eth->state);
--
--      return 0;
--}
--
--static void airoha_hw_cleanup(struct airoha_qdma *qdma)
--{
--      int i;
--
--      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
--              if (!qdma->q_rx[i].ndesc)
--                      continue;
--
--              netif_napi_del(&qdma->q_rx[i].napi);
--              airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
--              if (qdma->q_rx[i].page_pool)
--                      page_pool_destroy(qdma->q_rx[i].page_pool);
--      }
--
--      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
--              netif_napi_del(&qdma->q_tx_irq[i].napi);
--
--      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
--              if (!qdma->q_tx[i].ndesc)
--                      continue;
--
--              airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
--      }
--}
--
--static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
--{
--      int i;
--
--      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
--              napi_enable(&qdma->q_tx_irq[i].napi);
--
--      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
--              if (!qdma->q_rx[i].ndesc)
--                      continue;
--
--              napi_enable(&qdma->q_rx[i].napi);
--      }
--}
--
--static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
--{
--      int i;
--
--      for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
--              napi_disable(&qdma->q_tx_irq[i].napi);
--
--      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
--              if (!qdma->q_rx[i].ndesc)
--                      continue;
--
--              napi_disable(&qdma->q_rx[i].napi);
--      }
--}
--
--static void airoha_update_hw_stats(struct airoha_gdm_port *port)
--{
--      struct airoha_eth *eth = port->qdma->eth;
--      u32 val, i = 0;
--
--      spin_lock(&port->stats.lock);
--      u64_stats_update_begin(&port->stats.syncp);
--
--      /* TX */
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
--      port->stats.tx_ok_pkts += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
--      port->stats.tx_ok_pkts += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
--      port->stats.tx_ok_bytes += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
--      port->stats.tx_ok_bytes += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
--      port->stats.tx_drops += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
--      port->stats.tx_broadcast += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
--      port->stats.tx_multicast += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
--      port->stats.tx_len[i] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
--      port->stats.tx_len[i] += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
--      port->stats.tx_len[i++] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
--      port->stats.tx_len[i] += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
--      port->stats.tx_len[i++] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
--      port->stats.tx_len[i] += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
--      port->stats.tx_len[i++] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
--      port->stats.tx_len[i] += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
--      port->stats.tx_len[i++] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
--      port->stats.tx_len[i] += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
--      port->stats.tx_len[i++] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
--      port->stats.tx_len[i] += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
--      port->stats.tx_len[i++] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
--      port->stats.tx_len[i++] += val;
--
--      /* RX */
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
--      port->stats.rx_ok_pkts += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
--      port->stats.rx_ok_pkts += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
--      port->stats.rx_ok_bytes += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
--      port->stats.rx_ok_bytes += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
--      port->stats.rx_drops += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
--      port->stats.rx_broadcast += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
--      port->stats.rx_multicast += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
--      port->stats.rx_errors += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
--      port->stats.rx_crc_error += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
--      port->stats.rx_over_errors += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
--      port->stats.rx_fragment += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
--      port->stats.rx_jabber += val;
--
--      i = 0;
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
--      port->stats.rx_len[i] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
--      port->stats.rx_len[i] += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
--      port->stats.rx_len[i++] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
--      port->stats.rx_len[i] += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
--      port->stats.rx_len[i++] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
--      port->stats.rx_len[i] += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
--      port->stats.rx_len[i++] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
--      port->stats.rx_len[i] += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
--      port->stats.rx_len[i++] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
--      port->stats.rx_len[i] += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
--      port->stats.rx_len[i++] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
--      port->stats.rx_len[i] += ((u64)val << 32);
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
--      port->stats.rx_len[i++] += val;
--
--      val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
--      port->stats.rx_len[i++] += val;
--
--      /* reset mib counters */
--      airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
--                    FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
--
--      u64_stats_update_end(&port->stats.syncp);
--      spin_unlock(&port->stats.lock);
--}
--
--static int airoha_dev_open(struct net_device *dev)
--{
--      struct airoha_gdm_port *port = netdev_priv(dev);
--      struct airoha_qdma *qdma = port->qdma;
--      int err;
--
--      netif_tx_start_all_queues(dev);
--      err = airoha_set_gdm_ports(qdma->eth, true);
--      if (err)
--              return err;
--
--      if (netdev_uses_dsa(dev))
--              airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
--                            GDM_STAG_EN_MASK);
--      else
--              airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
--                              GDM_STAG_EN_MASK);
--
--      airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
--                      GLOBAL_CFG_TX_DMA_EN_MASK |
--                      GLOBAL_CFG_RX_DMA_EN_MASK);
--
--      return 0;
--}
--
--static int airoha_dev_stop(struct net_device *dev)
--{
--      struct airoha_gdm_port *port = netdev_priv(dev);
--      struct airoha_qdma *qdma = port->qdma;
--      int i, err;
--
--      netif_tx_disable(dev);
--      err = airoha_set_gdm_ports(qdma->eth, false);
--      if (err)
--              return err;
--
--      airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
--                        GLOBAL_CFG_TX_DMA_EN_MASK |
--                        GLOBAL_CFG_RX_DMA_EN_MASK);
--
--      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
--              if (!qdma->q_tx[i].ndesc)
--                      continue;
--
--              airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
--              netdev_tx_reset_subqueue(dev, i);
--      }
--
--      return 0;
--}
--
--static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
--{
--      struct airoha_gdm_port *port = netdev_priv(dev);
--      int err;
--
--      err = eth_mac_addr(dev, p);
--      if (err)
--              return err;
--
--      airoha_set_macaddr(port, dev->dev_addr);
--
--      return 0;
--}
--
--static int airoha_dev_init(struct net_device *dev)
--{
--      struct airoha_gdm_port *port = netdev_priv(dev);
--
--      airoha_set_macaddr(port, dev->dev_addr);
--
--      return 0;
--}
--
--static void airoha_dev_get_stats64(struct net_device *dev,
--                                 struct rtnl_link_stats64 *storage)
--{
--      struct airoha_gdm_port *port = netdev_priv(dev);
--      unsigned int start;
--
--      airoha_update_hw_stats(port);
--      do {
--              start = u64_stats_fetch_begin(&port->stats.syncp);
--              storage->rx_packets = port->stats.rx_ok_pkts;
--              storage->tx_packets = port->stats.tx_ok_pkts;
--              storage->rx_bytes = port->stats.rx_ok_bytes;
--              storage->tx_bytes = port->stats.tx_ok_bytes;
--              storage->multicast = port->stats.rx_multicast;
--              storage->rx_errors = port->stats.rx_errors;
--              storage->rx_dropped = port->stats.rx_drops;
--              storage->tx_dropped = port->stats.tx_drops;
--              storage->rx_crc_errors = port->stats.rx_crc_error;
--              storage->rx_over_errors = port->stats.rx_over_errors;
--      } while (u64_stats_fetch_retry(&port->stats.syncp, start));
--}
--
--static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
--                                 struct net_device *sb_dev)
--{
--      struct airoha_gdm_port *port = netdev_priv(dev);
--      int queue, channel;
--
--      /* For dsa device select QoS channel according to the dsa user port
--       * index, rely on port id otherwise. Select QoS queue based on the
--       * skb priority.
--       */
--      channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
--      channel = channel % AIROHA_NUM_QOS_CHANNELS;
--      queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
--      queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
--
--      return queue < dev->num_tx_queues ? queue : 0;
--}
--
--static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
--                                 struct net_device *dev)
--{
--      struct airoha_gdm_port *port = netdev_priv(dev);
--      u32 nr_frags = 1 + skb_shinfo(skb)->nr_frags;
--      u32 msg0, msg1, len = skb_headlen(skb);
--      struct airoha_qdma *qdma = port->qdma;
--      struct netdev_queue *txq;
--      struct airoha_queue *q;
--      void *data = skb->data;
--      int i, qid;
--      u16 index;
--      u8 fport;
--
--      qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
--      msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
--                        qid / AIROHA_NUM_QOS_QUEUES) |
--             FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
--                        qid % AIROHA_NUM_QOS_QUEUES);
--      if (skb->ip_summed == CHECKSUM_PARTIAL)
--              msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
--                      FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
--                      FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
--
--      /* TSO: fill MSS info in tcp checksum field */
--      if (skb_is_gso(skb)) {
--              if (skb_cow_head(skb, 0))
--                      goto error;
--
--              if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
--                                               SKB_GSO_TCPV6)) {
--                      __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
--
--                      tcp_hdr(skb)->check = (__force __sum16)csum;
--                      msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
--              }
--      }
--
--      fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
--      msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
--             FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
--
--      q = &qdma->q_tx[qid];
--      if (WARN_ON_ONCE(!q->ndesc))
--              goto error;
--
--      spin_lock_bh(&q->lock);
--
--      txq = netdev_get_tx_queue(dev, qid);
--      if (q->queued + nr_frags > q->ndesc) {
--              /* not enough space in the queue */
--              netif_tx_stop_queue(txq);
--              spin_unlock_bh(&q->lock);
--              return NETDEV_TX_BUSY;
--      }
--
--      index = q->head;
--      for (i = 0; i < nr_frags; i++) {
--              struct airoha_qdma_desc *desc = &q->desc[index];
--              struct airoha_queue_entry *e = &q->entry[index];
--              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
--              dma_addr_t addr;
--              u32 val;
--
--              addr = dma_map_single(dev->dev.parent, data, len,
--                                    DMA_TO_DEVICE);
--              if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
--                      goto error_unmap;
--
--              index = (index + 1) % q->ndesc;
--
--              val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
--              if (i < nr_frags - 1)
--                      val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
--              WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
--              WRITE_ONCE(desc->addr, cpu_to_le32(addr));
--              val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
--              WRITE_ONCE(desc->data, cpu_to_le32(val));
--              WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
--              WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
--              WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
--
--              e->skb = i ? NULL : skb;
--              e->dma_addr = addr;
--              e->dma_len = len;
--
--              data = skb_frag_address(frag);
--              len = skb_frag_size(frag);
--      }
--
--      q->head = index;
--      q->queued += i;
--
--      skb_tx_timestamp(skb);
--      netdev_tx_sent_queue(txq, skb->len);
--
--      if (netif_xmit_stopped(txq) || !netdev_xmit_more())
--              airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
--                              TX_RING_CPU_IDX_MASK,
--                              FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
--
--      if (q->ndesc - q->queued < q->free_thr)
--              netif_tx_stop_queue(txq);
--
--      spin_unlock_bh(&q->lock);
--
--      return NETDEV_TX_OK;
--
--error_unmap:
--      for (i--; i >= 0; i--) {
--              index = (q->head + i) % q->ndesc;
--              dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
--                               q->entry[index].dma_len, DMA_TO_DEVICE);
--      }
--
--      spin_unlock_bh(&q->lock);
--error:
--      dev_kfree_skb_any(skb);
--      dev->stats.tx_dropped++;
--
--      return NETDEV_TX_OK;
--}
--
--static void airoha_ethtool_get_drvinfo(struct net_device *dev,
--                                     struct ethtool_drvinfo *info)
--{
--      struct airoha_gdm_port *port = netdev_priv(dev);
--      struct airoha_eth *eth = port->qdma->eth;
--
--      strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
--      strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
--}
--
--static void airoha_ethtool_get_mac_stats(struct net_device *dev,
--                                       struct ethtool_eth_mac_stats *stats)
--{
--      struct airoha_gdm_port *port = netdev_priv(dev);
--      unsigned int start;
--
--      airoha_update_hw_stats(port);
--      do {
--              start = u64_stats_fetch_begin(&port->stats.syncp);
--              stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
--              stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
--              stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
--      } while (u64_stats_fetch_retry(&port->stats.syncp, start));
--}
--
--static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
--      {    0,    64 },
--      {   65,   127 },
--      {  128,   255 },
--      {  256,   511 },
--      {  512,  1023 },
--      { 1024,  1518 },
--      { 1519, 10239 },
--      {},
--};
--
--static void
--airoha_ethtool_get_rmon_stats(struct net_device *dev,
--                            struct ethtool_rmon_stats *stats,
--                            const struct ethtool_rmon_hist_range **ranges)
--{
--      struct airoha_gdm_port *port = netdev_priv(dev);
--      struct airoha_hw_stats *hw_stats = &port->stats;
--      unsigned int start;
--
--      BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
--                   ARRAY_SIZE(hw_stats->tx_len) + 1);
--      BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
--                   ARRAY_SIZE(hw_stats->rx_len) + 1);
--
--      *ranges = airoha_ethtool_rmon_ranges;
--      airoha_update_hw_stats(port);
--      do {
--              int i;
--
--              start = u64_stats_fetch_begin(&port->stats.syncp);
--              stats->fragments = hw_stats->rx_fragment;
--              stats->jabbers = hw_stats->rx_jabber;
--              for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
--                   i++) {
--                      stats->hist[i] = hw_stats->rx_len[i];
--                      stats->hist_tx[i] = hw_stats->tx_len[i];
--              }
--      } while (u64_stats_fetch_retry(&port->stats.syncp, start));
--}
--
--static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
--                                       int channel, enum tx_sched_mode mode,
--                                       const u16 *weights, u8 n_weights)
--{
--      int i;
--
--      for (i = 0; i < AIROHA_NUM_TX_RING; i++)
--              airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
--                                TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
--
--      for (i = 0; i < n_weights; i++) {
--              u32 status;
--              int err;
--
--              airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
--                             TWRR_RW_CMD_MASK |
--                             FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
--                             FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
--                             FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
--              err = read_poll_timeout(airoha_qdma_rr, status,
--                                      status & TWRR_RW_CMD_DONE,
--                                      USEC_PER_MSEC, 10 * USEC_PER_MSEC,
--                                      true, port->qdma,
--                                      REG_TXWRR_WEIGHT_CFG);
--              if (err)
--                      return err;
--      }
--
--      airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
--                      CHAN_QOS_MODE_MASK(channel),
--                      mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
--
--      return 0;
--}
--
--static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
--                                       int channel)
--{
--      static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
--
--      return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
--                                           ARRAY_SIZE(w));
--}
--
--static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
--                                      int channel,
--                                      struct tc_ets_qopt_offload *opt)
--{
--      struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
--      enum tx_sched_mode mode = TC_SCH_SP;
--      u16 w[AIROHA_NUM_QOS_QUEUES] = {};
--      int i, nstrict = 0, nwrr, qidx;
--
--      if (p->bands > AIROHA_NUM_QOS_QUEUES)
--              return -EINVAL;
--
--      for (i = 0; i < p->bands; i++) {
--              if (!p->quanta[i])
--                      nstrict++;
--      }
--
--      /* this configuration is not supported by the hw */
--      if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
--              return -EINVAL;
--
--      /* EN7581 SoC supports fixed QoS band priority where WRR queues have
--       * lowest priorities with respect to SP ones.
--       * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
--       */
--      nwrr = p->bands - nstrict;
--      qidx = nstrict && nwrr ? nstrict : 0;
--      for (i = 1; i <= p->bands; i++) {
--              if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx)
--                      return -EINVAL;
--
--              qidx = i == nwrr ? 0 : qidx + 1;
--      }
--
--      for (i = 0; i < nwrr; i++)
--              w[i] = p->weights[nstrict + i];
--
--      if (!nstrict)
--              mode = TC_SCH_WRR8;
--      else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
--              mode = nstrict + 1;
--
--      return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
--                                           ARRAY_SIZE(w));
--}
--
--static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
--                                      int channel,
--                                      struct tc_ets_qopt_offload *opt)
--{
--      u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
--                                          REG_CNTR_VAL(channel << 1));
--      u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
--                                          REG_CNTR_VAL((channel << 1) + 1));
--      u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
--                       (fwd_tx_packets - port->fwd_tx_packets);
--      _bstats_update(opt->stats.bstats, 0, tx_packets);
--
--      port->cpu_tx_packets = cpu_tx_packets;
--      port->fwd_tx_packets = fwd_tx_packets;
--
--      return 0;
--}
--
--static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
--                                   struct tc_ets_qopt_offload *opt)
--{
--      int channel = TC_H_MAJ(opt->handle) >> 16;
--
--      if (opt->parent == TC_H_ROOT)
--              return -EINVAL;
--
--      switch (opt->command) {
--      case TC_ETS_REPLACE:
--              return airoha_qdma_set_tx_ets_sched(port, channel, opt);
--      case TC_ETS_DESTROY:
--              /* PRIO is default qdisc scheduler */
--              return airoha_qdma_set_tx_prio_sched(port, channel);
--      case TC_ETS_STATS:
--              return airoha_qdma_get_tx_ets_stats(port, channel, opt);
--      default:
--              return -EOPNOTSUPP;
--      }
--}
--
--static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
--                                     u32 addr, enum trtcm_param_type param,
--                                     enum trtcm_mode_type mode,
--                                     u32 *val_low, u32 *val_high)
--{
--      u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
--      u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
--                        FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
--                        FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
--                        FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
--
--      airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
--      if (read_poll_timeout(airoha_qdma_rr, val,
--                            val & TRTCM_PARAM_RW_DONE_MASK,
--                            USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
--                            qdma, REG_TRTCM_CFG_PARAM(addr)))
--              return -ETIMEDOUT;
--
--      *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
--      if (val_high)
--              *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
--
--      return 0;
--}
--
--static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
--                                     u32 addr, enum trtcm_param_type param,
--                                     enum trtcm_mode_type mode, u32 val)
--{
--      u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
--      u32 config = TRTCM_PARAM_RW_MASK |
--                   FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
--                   FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
--                   FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
--                   FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
--
--      airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
--      airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
--
--      return read_poll_timeout(airoha_qdma_rr, val,
--                               val & TRTCM_PARAM_RW_DONE_MASK,
--                               USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
--                               qdma, REG_TRTCM_CFG_PARAM(addr));
--}
--
--static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
--                                      u32 addr, enum trtcm_mode_type mode,
--                                      bool enable, u32 enable_mask)
--{
--      u32 val;
--
--      if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
--                                      mode, &val, NULL))
--              return -EINVAL;
--
--      val = enable ? val | enable_mask : val & ~enable_mask;
--
--      return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
--                                         mode, val);
--}
--
--static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
--                                            int channel, u32 addr,
--                                            enum trtcm_mode_type mode,
--                                            u32 rate_val, u32 bucket_size)
--{
--      u32 val, config, tick, unit, rate, rate_frac;
--      int err;
--
--      if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
--                                      mode, &config, NULL))
--              return -EINVAL;
--
--      val = airoha_qdma_rr(qdma, addr);
--      tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
--      if (config & TRTCM_TICK_SEL)
--              tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
--      if (!tick)
--              return -EINVAL;
--
--      unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
--      if (!unit)
--              return -EINVAL;
--
--      rate = rate_val / unit;
--      rate_frac = rate_val % unit;
--      rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
--      rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
--             FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
--
--      err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
--                                        TRTCM_TOKEN_RATE_MODE, mode, rate);
--      if (err)
--              return err;
--
--      val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
--      val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
--
--      return airoha_qdma_set_trtcm_param(qdma, channel, addr,
--                                         TRTCM_BUCKETSIZE_SHIFT_MODE,
--                                         mode, val);
--}
--
--static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
--                                       int channel, u32 rate,
--                                       u32 bucket_size)
--{
--      int i, err;
--
--      for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
--              err = airoha_qdma_set_trtcm_config(port->qdma, channel,
--                                                 REG_EGRESS_TRTCM_CFG, i,
--                                                 !!rate, TRTCM_METER_MODE);
--              if (err)
--                      return err;
--
--              err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
--                                                       REG_EGRESS_TRTCM_CFG,
--                                                       i, rate, bucket_size);
--              if (err)
--                      return err;
--      }
--
--      return 0;
--}
--
--static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
--                                        struct tc_htb_qopt_offload *opt)
--{
--      u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
--      u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
--      struct net_device *dev = port->dev;
--      int num_tx_queues = dev->real_num_tx_queues;
--      int err;
--
--      if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
--              NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
--              return -EINVAL;
--      }
--
--      err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
--      if (err) {
--              NL_SET_ERR_MSG_MOD(opt->extack,
--                                 "failed configuring htb offload");
--              return err;
--      }
--
--      if (opt->command == TC_HTB_NODE_MODIFY)
--              return 0;
--
--      err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
--      if (err) {
--              airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
--              NL_SET_ERR_MSG_MOD(opt->extack,
--                                 "failed setting real_num_tx_queues");
--              return err;
--      }
--
--      set_bit(channel, port->qos_sq_bmap);
--      opt->qid = AIROHA_NUM_TX_RING + channel;
--
--      return 0;
--}
--
--static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
--{
--      struct net_device *dev = port->dev;
--
--      netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
--      airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
--      clear_bit(queue, port->qos_sq_bmap);
--}
--
--static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
--                                         struct tc_htb_qopt_offload *opt)
--{
--      u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
--
--      if (!test_bit(channel, port->qos_sq_bmap)) {
--              NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
--              return -EINVAL;
--      }
--
--      airoha_tc_remove_htb_queue(port, channel);
--
--      return 0;
--}
--
--static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
--{
--      int q;
--
--      for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
--              airoha_tc_remove_htb_queue(port, q);
--
--      return 0;
--}
--
--static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
--                                          struct tc_htb_qopt_offload *opt)
--{
--      u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
--
--      if (!test_bit(channel, port->qos_sq_bmap)) {
--              NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
--              return -EINVAL;
--      }
--
--      opt->qid = channel;
--
--      return 0;
--}
--
--static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
--                                   struct tc_htb_qopt_offload *opt)
--{
--      switch (opt->command) {
--      case TC_HTB_CREATE:
--              break;
--      case TC_HTB_DESTROY:
--              return airoha_tc_htb_destroy(port);
--      case TC_HTB_NODE_MODIFY:
--      case TC_HTB_LEAF_ALLOC_QUEUE:
--              return airoha_tc_htb_alloc_leaf_queue(port, opt);
--      case TC_HTB_LEAF_DEL:
--      case TC_HTB_LEAF_DEL_LAST:
--      case TC_HTB_LEAF_DEL_LAST_FORCE:
--              return airoha_tc_htb_delete_leaf_queue(port, opt);
--      case TC_HTB_LEAF_QUERY_QUEUE:
--              return airoha_tc_get_htb_get_leaf_queue(port, opt);
--      default:
--              return -EOPNOTSUPP;
--      }
--
--      return 0;
--}
--
--static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
--                             void *type_data)
--{
--      struct airoha_gdm_port *port = netdev_priv(dev);
--
--      switch (type) {
--      case TC_SETUP_QDISC_ETS:
--              return airoha_tc_setup_qdisc_ets(port, type_data);
--      case TC_SETUP_QDISC_HTB:
--              return airoha_tc_setup_qdisc_htb(port, type_data);
--      default:
--              return -EOPNOTSUPP;
--      }
--}
--
--static const struct net_device_ops airoha_netdev_ops = {
--      .ndo_init               = airoha_dev_init,
--      .ndo_open               = airoha_dev_open,
--      .ndo_stop               = airoha_dev_stop,
--      .ndo_select_queue       = airoha_dev_select_queue,
--      .ndo_start_xmit         = airoha_dev_xmit,
--      .ndo_get_stats64        = airoha_dev_get_stats64,
--      .ndo_set_mac_address    = airoha_dev_set_macaddr,
--      .ndo_setup_tc           = airoha_dev_tc_setup,
--};
--
--static const struct ethtool_ops airoha_ethtool_ops = {
--      .get_drvinfo            = airoha_ethtool_get_drvinfo,
--      .get_eth_mac_stats      = airoha_ethtool_get_mac_stats,
--      .get_rmon_stats         = airoha_ethtool_get_rmon_stats,
--};
--
--static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
--{
--      const __be32 *id_ptr = of_get_property(np, "reg", NULL);
--      struct airoha_gdm_port *port;
--      struct airoha_qdma *qdma;
--      struct net_device *dev;
--      int err, index;
--      u32 id;
--
--      if (!id_ptr) {
--              dev_err(eth->dev, "missing gdm port id\n");
--              return -EINVAL;
--      }
--
--      id = be32_to_cpup(id_ptr);
--      index = id - 1;
--
--      if (!id || id > ARRAY_SIZE(eth->ports)) {
--              dev_err(eth->dev, "invalid gdm port id: %d\n", id);
--              return -EINVAL;
--      }
--
--      if (eth->ports[index]) {
--              dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
--              return -EINVAL;
--      }
--
--      dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
--                                    AIROHA_NUM_NETDEV_TX_RINGS,
--                                    AIROHA_NUM_RX_RING);
--      if (!dev) {
--              dev_err(eth->dev, "alloc_etherdev failed\n");
--              return -ENOMEM;
--      }
--
--      qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
--      dev->netdev_ops = &airoha_netdev_ops;
--      dev->ethtool_ops = &airoha_ethtool_ops;
--      dev->max_mtu = AIROHA_MAX_MTU;
--      dev->watchdog_timeo = 5 * HZ;
--      dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
--                         NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
--                         NETIF_F_SG | NETIF_F_TSO |
--                         NETIF_F_HW_TC;
--      dev->features |= dev->hw_features;
--      dev->dev.of_node = np;
--      dev->irq = qdma->irq;
--      SET_NETDEV_DEV(dev, eth->dev);
--
--      /* reserve hw queues for HTB offloading */
--      err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
--      if (err)
--              return err;
--
--      err = of_get_ethdev_address(np, dev);
--      if (err) {
--              if (err == -EPROBE_DEFER)
--                      return err;
--
--              eth_hw_addr_random(dev);
--              dev_info(eth->dev, "generated random MAC address %pM\n",
--                       dev->dev_addr);
--      }
--
--      port = netdev_priv(dev);
--      u64_stats_init(&port->stats.syncp);
--      spin_lock_init(&port->stats.lock);
--      port->qdma = qdma;
--      port->dev = dev;
--      port->id = id;
--      eth->ports[index] = port;
--
--      return register_netdev(dev);
--}
--
--static int airoha_probe(struct platform_device *pdev)
--{
--      struct device_node *np;
--      struct airoha_eth *eth;
--      int i, err;
--
--      eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
--      if (!eth)
--              return -ENOMEM;
--
--      eth->dev = &pdev->dev;
--
--      err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
--      if (err) {
--              dev_err(eth->dev, "failed configuring DMA mask\n");
--              return err;
--      }
--
--      eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
--      if (IS_ERR(eth->fe_regs))
--              return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
--                                   "failed to iomap fe regs\n");
--
--      eth->rsts[0].id = "fe";
--      eth->rsts[1].id = "pdma";
--      eth->rsts[2].id = "qdma";
--      err = devm_reset_control_bulk_get_exclusive(eth->dev,
--                                                  ARRAY_SIZE(eth->rsts),
--                                                  eth->rsts);
--      if (err) {
--              dev_err(eth->dev, "failed to get bulk reset lines\n");
--              return err;
--      }
--
--      eth->xsi_rsts[0].id = "xsi-mac";
--      eth->xsi_rsts[1].id = "hsi0-mac";
--      eth->xsi_rsts[2].id = "hsi1-mac";
--      eth->xsi_rsts[3].id = "hsi-mac";
--      eth->xsi_rsts[4].id = "xfp-mac";
--      err = devm_reset_control_bulk_get_exclusive(eth->dev,
--                                                  ARRAY_SIZE(eth->xsi_rsts),
--                                                  eth->xsi_rsts);
--      if (err) {
--              dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
--              return err;
--      }
--
--      eth->napi_dev = alloc_netdev_dummy(0);
--      if (!eth->napi_dev)
--              return -ENOMEM;
--
--      /* Enable threaded NAPI by default */
--      eth->napi_dev->threaded = true;
--      strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
--      platform_set_drvdata(pdev, eth);
--
--      err = airoha_hw_init(pdev, eth);
--      if (err)
--              goto error_hw_cleanup;
--
--      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
--              airoha_qdma_start_napi(&eth->qdma[i]);
--
--      for_each_child_of_node(pdev->dev.of_node, np) {
--              if (!of_device_is_compatible(np, "airoha,eth-mac"))
--                      continue;
--
--              if (!of_device_is_available(np))
--                      continue;
--
--              err = airoha_alloc_gdm_port(eth, np);
--              if (err) {
--                      of_node_put(np);
--                      goto error_napi_stop;
--              }
--      }
--
--      return 0;
--
--error_napi_stop:
--      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
--              airoha_qdma_stop_napi(&eth->qdma[i]);
--error_hw_cleanup:
--      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
--              airoha_hw_cleanup(&eth->qdma[i]);
--
--      for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
--              struct airoha_gdm_port *port = eth->ports[i];
--
--              if (port && port->dev->reg_state == NETREG_REGISTERED)
--                      unregister_netdev(port->dev);
--      }
--      free_netdev(eth->napi_dev);
--      platform_set_drvdata(pdev, NULL);
--
--      return err;
--}
--
--static void airoha_remove(struct platform_device *pdev)
--{
--      struct airoha_eth *eth = platform_get_drvdata(pdev);
--      int i;
--
--      for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
--              airoha_qdma_stop_napi(&eth->qdma[i]);
--              airoha_hw_cleanup(&eth->qdma[i]);
--      }
--
--      for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
--              struct airoha_gdm_port *port = eth->ports[i];
--
--              if (!port)
--                      continue;
--
--              airoha_dev_stop(port->dev);
--              unregister_netdev(port->dev);
--      }
--      free_netdev(eth->napi_dev);
--
--      platform_set_drvdata(pdev, NULL);
--}
--
--static const struct of_device_id of_airoha_match[] = {
--      { .compatible = "airoha,en7581-eth" },
--      { /* sentinel */ }
--};
--MODULE_DEVICE_TABLE(of, of_airoha_match);
--
--static struct platform_driver airoha_driver = {
--      .probe = airoha_probe,
--      .remove_new = airoha_remove,
--      .driver = {
--              .name = KBUILD_MODNAME,
--              .of_match_table = of_airoha_match,
--      },
--};
--module_platform_driver(airoha_driver);
--
--MODULE_LICENSE("GPL");
--MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
--MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
diff --git a/target/linux/airoha/patches-6.6/048-02-v6.15-net-airoha-Move-definitions-in-airoha_eth.h.patch b/target/linux/airoha/patches-6.6/048-02-v6.15-net-airoha-Move-definitions-in-airoha_eth.h.patch
deleted file mode 100644 (file)
index 8539128..0000000
+++ /dev/null
@@ -1,538 +0,0 @@
-From b38f4ff0ceacd6ce8d333a8dc90f405a040968d3 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:10 +0100
-Subject: [PATCH 02/15] net: airoha: Move definitions in airoha_eth.h
-
-Move common airoha_eth definitions in airoha_eth.h in order to reuse
-them for Packet Processor Engine (PPE) codebase.
-PPE module is used to enable support for flowtable hw offloading in
-airoha_eth driver.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 240 +---------------------
- drivers/net/ethernet/airoha/airoha_eth.h | 251 +++++++++++++++++++++++
- 2 files changed, 252 insertions(+), 239 deletions(-)
- create mode 100644 drivers/net/ethernet/airoha/airoha_eth.h
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -3,14 +3,9 @@
-  * Copyright (c) 2024 AIROHA Inc
-  * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-  */
--#include <linux/etherdevice.h>
--#include <linux/iopoll.h>
--#include <linux/kernel.h>
--#include <linux/netdevice.h>
- #include <linux/of.h>
- #include <linux/of_net.h>
- #include <linux/platform_device.h>
--#include <linux/reset.h>
- #include <linux/tcp.h>
- #include <linux/u64_stats_sync.h>
- #include <net/dsa.h>
-@@ -18,35 +13,7 @@
- #include <net/pkt_cls.h>
- #include <uapi/linux/ppp_defs.h>
--#define AIROHA_MAX_NUM_GDM_PORTS      1
--#define AIROHA_MAX_NUM_QDMA           2
--#define AIROHA_MAX_NUM_RSTS           3
--#define AIROHA_MAX_NUM_XSI_RSTS               5
--#define AIROHA_MAX_MTU                        2000
--#define AIROHA_MAX_PACKET_SIZE                2048
--#define AIROHA_NUM_QOS_CHANNELS               4
--#define AIROHA_NUM_QOS_QUEUES         8
--#define AIROHA_NUM_TX_RING            32
--#define AIROHA_NUM_RX_RING            32
--#define AIROHA_NUM_NETDEV_TX_RINGS    (AIROHA_NUM_TX_RING + \
--                                       AIROHA_NUM_QOS_CHANNELS)
--#define AIROHA_FE_MC_MAX_VLAN_TABLE   64
--#define AIROHA_FE_MC_MAX_VLAN_PORT    16
--#define AIROHA_NUM_TX_IRQ             2
--#define HW_DSCP_NUM                   2048
--#define IRQ_QUEUE_LEN(_n)             ((_n) ? 1024 : 2048)
--#define TX_DSCP_NUM                   1024
--#define RX_DSCP_NUM(_n)                       \
--      ((_n) ==  2 ? 128 :             \
--       (_n) == 11 ? 128 :             \
--       (_n) == 15 ? 128 :             \
--       (_n) ==  0 ? 1024 : 16)
--
--#define PSE_RSV_PAGES                 128
--#define PSE_QUEUE_RSV_PAGES           64
--
--#define QDMA_METER_IDX(_n)            ((_n) & 0xff)
--#define QDMA_METER_GROUP(_n)          (((_n) >> 8) & 0x3)
-+#include "airoha_eth.h"
- /* FE */
- #define PSE_BASE                      0x0100
-@@ -706,211 +673,6 @@ struct airoha_qdma_fwd_desc {
-       __le32 rsv1;
- };
--enum {
--      QDMA_INT_REG_IDX0,
--      QDMA_INT_REG_IDX1,
--      QDMA_INT_REG_IDX2,
--      QDMA_INT_REG_IDX3,
--      QDMA_INT_REG_IDX4,
--      QDMA_INT_REG_MAX
--};
--
--enum {
--      XSI_PCIE0_PORT,
--      XSI_PCIE1_PORT,
--      XSI_USB_PORT,
--      XSI_AE_PORT,
--      XSI_ETH_PORT,
--};
--
--enum {
--      XSI_PCIE0_VIP_PORT_MASK = BIT(22),
--      XSI_PCIE1_VIP_PORT_MASK = BIT(23),
--      XSI_USB_VIP_PORT_MASK   = BIT(25),
--      XSI_ETH_VIP_PORT_MASK   = BIT(24),
--};
--
--enum {
--      DEV_STATE_INITIALIZED,
--};
--
--enum {
--      CDM_CRSN_QSEL_Q1 = 1,
--      CDM_CRSN_QSEL_Q5 = 5,
--      CDM_CRSN_QSEL_Q6 = 6,
--      CDM_CRSN_QSEL_Q15 = 15,
--};
--
--enum {
--      CRSN_08 = 0x8,
--      CRSN_21 = 0x15, /* KA */
--      CRSN_22 = 0x16, /* hit bind and force route to CPU */
--      CRSN_24 = 0x18,
--      CRSN_25 = 0x19,
--};
--
--enum {
--      FE_PSE_PORT_CDM1,
--      FE_PSE_PORT_GDM1,
--      FE_PSE_PORT_GDM2,
--      FE_PSE_PORT_GDM3,
--      FE_PSE_PORT_PPE1,
--      FE_PSE_PORT_CDM2,
--      FE_PSE_PORT_CDM3,
--      FE_PSE_PORT_CDM4,
--      FE_PSE_PORT_PPE2,
--      FE_PSE_PORT_GDM4,
--      FE_PSE_PORT_CDM5,
--      FE_PSE_PORT_DROP = 0xf,
--};
--
--enum tx_sched_mode {
--      TC_SCH_WRR8,
--      TC_SCH_SP,
--      TC_SCH_WRR7,
--      TC_SCH_WRR6,
--      TC_SCH_WRR5,
--      TC_SCH_WRR4,
--      TC_SCH_WRR3,
--      TC_SCH_WRR2,
--};
--
--enum trtcm_param_type {
--      TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
--      TRTCM_TOKEN_RATE_MODE,
--      TRTCM_BUCKETSIZE_SHIFT_MODE,
--      TRTCM_BUCKET_COUNTER_MODE,
--};
--
--enum trtcm_mode_type {
--      TRTCM_COMMIT_MODE,
--      TRTCM_PEAK_MODE,
--};
--
--enum trtcm_param {
--      TRTCM_TICK_SEL = BIT(0),
--      TRTCM_PKT_MODE = BIT(1),
--      TRTCM_METER_MODE = BIT(2),
--};
--
--#define MIN_TOKEN_SIZE                                4096
--#define MAX_TOKEN_SIZE_OFFSET                 17
--#define TRTCM_TOKEN_RATE_MASK                 GENMASK(23, 6)
--#define TRTCM_TOKEN_RATE_FRACTION_MASK                GENMASK(5, 0)
--
--struct airoha_queue_entry {
--      union {
--              void *buf;
--              struct sk_buff *skb;
--      };
--      dma_addr_t dma_addr;
--      u16 dma_len;
--};
--
--struct airoha_queue {
--      struct airoha_qdma *qdma;
--
--      /* protect concurrent queue accesses */
--      spinlock_t lock;
--      struct airoha_queue_entry *entry;
--      struct airoha_qdma_desc *desc;
--      u16 head;
--      u16 tail;
--
--      int queued;
--      int ndesc;
--      int free_thr;
--      int buf_size;
--
--      struct napi_struct napi;
--      struct page_pool *page_pool;
--};
--
--struct airoha_tx_irq_queue {
--      struct airoha_qdma *qdma;
--
--      struct napi_struct napi;
--
--      int size;
--      u32 *q;
--};
--
--struct airoha_hw_stats {
--      /* protect concurrent hw_stats accesses */
--      spinlock_t lock;
--      struct u64_stats_sync syncp;
--
--      /* get_stats64 */
--      u64 rx_ok_pkts;
--      u64 tx_ok_pkts;
--      u64 rx_ok_bytes;
--      u64 tx_ok_bytes;
--      u64 rx_multicast;
--      u64 rx_errors;
--      u64 rx_drops;
--      u64 tx_drops;
--      u64 rx_crc_error;
--      u64 rx_over_errors;
--      /* ethtool stats */
--      u64 tx_broadcast;
--      u64 tx_multicast;
--      u64 tx_len[7];
--      u64 rx_broadcast;
--      u64 rx_fragment;
--      u64 rx_jabber;
--      u64 rx_len[7];
--};
--
--struct airoha_qdma {
--      struct airoha_eth *eth;
--      void __iomem *regs;
--
--      /* protect concurrent irqmask accesses */
--      spinlock_t irq_lock;
--      u32 irqmask[QDMA_INT_REG_MAX];
--      int irq;
--
--      struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
--
--      struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
--      struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
--
--      /* descriptor and packet buffers for qdma hw forward */
--      struct {
--              void *desc;
--              void *q;
--      } hfwd;
--};
--
--struct airoha_gdm_port {
--      struct airoha_qdma *qdma;
--      struct net_device *dev;
--      int id;
--
--      struct airoha_hw_stats stats;
--
--      DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
--
--      /* qos stats counters */
--      u64 cpu_tx_packets;
--      u64 fwd_tx_packets;
--};
--
--struct airoha_eth {
--      struct device *dev;
--
--      unsigned long state;
--      void __iomem *fe_regs;
--
--      struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
--      struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
--
--      struct net_device *napi_dev;
--
--      struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
--      struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
--};
--
- static u32 airoha_rr(void __iomem *base, u32 offset)
- {
-       return readl(base + offset);
---- /dev/null
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -0,0 +1,251 @@
-+/* SPDX-License-Identifier: GPL-2.0-only */
-+/*
-+ * Copyright (c) 2024 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ */
-+
-+#ifndef AIROHA_ETH_H
-+#define AIROHA_ETH_H
-+
-+#include <linux/etherdevice.h>
-+#include <linux/iopoll.h>
-+#include <linux/kernel.h>
-+#include <linux/netdevice.h>
-+#include <linux/reset.h>
-+
-+#define AIROHA_MAX_NUM_GDM_PORTS      1
-+#define AIROHA_MAX_NUM_QDMA           2
-+#define AIROHA_MAX_NUM_RSTS           3
-+#define AIROHA_MAX_NUM_XSI_RSTS               5
-+#define AIROHA_MAX_MTU                        2000
-+#define AIROHA_MAX_PACKET_SIZE                2048
-+#define AIROHA_NUM_QOS_CHANNELS               4
-+#define AIROHA_NUM_QOS_QUEUES         8
-+#define AIROHA_NUM_TX_RING            32
-+#define AIROHA_NUM_RX_RING            32
-+#define AIROHA_NUM_NETDEV_TX_RINGS    (AIROHA_NUM_TX_RING + \
-+                                       AIROHA_NUM_QOS_CHANNELS)
-+#define AIROHA_FE_MC_MAX_VLAN_TABLE   64
-+#define AIROHA_FE_MC_MAX_VLAN_PORT    16
-+#define AIROHA_NUM_TX_IRQ             2
-+#define HW_DSCP_NUM                   2048
-+#define IRQ_QUEUE_LEN(_n)             ((_n) ? 1024 : 2048)
-+#define TX_DSCP_NUM                   1024
-+#define RX_DSCP_NUM(_n)                       \
-+      ((_n) ==  2 ? 128 :             \
-+       (_n) == 11 ? 128 :             \
-+       (_n) == 15 ? 128 :             \
-+       (_n) ==  0 ? 1024 : 16)
-+
-+#define PSE_RSV_PAGES                 128
-+#define PSE_QUEUE_RSV_PAGES           64
-+
-+#define QDMA_METER_IDX(_n)            ((_n) & 0xff)
-+#define QDMA_METER_GROUP(_n)          (((_n) >> 8) & 0x3)
-+
-+enum {
-+      QDMA_INT_REG_IDX0,
-+      QDMA_INT_REG_IDX1,
-+      QDMA_INT_REG_IDX2,
-+      QDMA_INT_REG_IDX3,
-+      QDMA_INT_REG_IDX4,
-+      QDMA_INT_REG_MAX
-+};
-+
-+enum {
-+      XSI_PCIE0_PORT,
-+      XSI_PCIE1_PORT,
-+      XSI_USB_PORT,
-+      XSI_AE_PORT,
-+      XSI_ETH_PORT,
-+};
-+
-+enum {
-+      XSI_PCIE0_VIP_PORT_MASK = BIT(22),
-+      XSI_PCIE1_VIP_PORT_MASK = BIT(23),
-+      XSI_USB_VIP_PORT_MASK   = BIT(25),
-+      XSI_ETH_VIP_PORT_MASK   = BIT(24),
-+};
-+
-+enum {
-+      DEV_STATE_INITIALIZED,
-+};
-+
-+enum {
-+      CDM_CRSN_QSEL_Q1 = 1,
-+      CDM_CRSN_QSEL_Q5 = 5,
-+      CDM_CRSN_QSEL_Q6 = 6,
-+      CDM_CRSN_QSEL_Q15 = 15,
-+};
-+
-+enum {
-+      CRSN_08 = 0x8,
-+      CRSN_21 = 0x15, /* KA */
-+      CRSN_22 = 0x16, /* hit bind and force route to CPU */
-+      CRSN_24 = 0x18,
-+      CRSN_25 = 0x19,
-+};
-+
-+enum {
-+      FE_PSE_PORT_CDM1,
-+      FE_PSE_PORT_GDM1,
-+      FE_PSE_PORT_GDM2,
-+      FE_PSE_PORT_GDM3,
-+      FE_PSE_PORT_PPE1,
-+      FE_PSE_PORT_CDM2,
-+      FE_PSE_PORT_CDM3,
-+      FE_PSE_PORT_CDM4,
-+      FE_PSE_PORT_PPE2,
-+      FE_PSE_PORT_GDM4,
-+      FE_PSE_PORT_CDM5,
-+      FE_PSE_PORT_DROP = 0xf,
-+};
-+
-+enum tx_sched_mode {
-+      TC_SCH_WRR8,
-+      TC_SCH_SP,
-+      TC_SCH_WRR7,
-+      TC_SCH_WRR6,
-+      TC_SCH_WRR5,
-+      TC_SCH_WRR4,
-+      TC_SCH_WRR3,
-+      TC_SCH_WRR2,
-+};
-+
-+enum trtcm_param_type {
-+      TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
-+      TRTCM_TOKEN_RATE_MODE,
-+      TRTCM_BUCKETSIZE_SHIFT_MODE,
-+      TRTCM_BUCKET_COUNTER_MODE,
-+};
-+
-+enum trtcm_mode_type {
-+      TRTCM_COMMIT_MODE,
-+      TRTCM_PEAK_MODE,
-+};
-+
-+enum trtcm_param {
-+      TRTCM_TICK_SEL = BIT(0),
-+      TRTCM_PKT_MODE = BIT(1),
-+      TRTCM_METER_MODE = BIT(2),
-+};
-+
-+#define MIN_TOKEN_SIZE                                4096
-+#define MAX_TOKEN_SIZE_OFFSET                 17
-+#define TRTCM_TOKEN_RATE_MASK                 GENMASK(23, 6)
-+#define TRTCM_TOKEN_RATE_FRACTION_MASK                GENMASK(5, 0)
-+
-+struct airoha_queue_entry {
-+      union {
-+              void *buf;
-+              struct sk_buff *skb;
-+      };
-+      dma_addr_t dma_addr;
-+      u16 dma_len;
-+};
-+
-+struct airoha_queue {
-+      struct airoha_qdma *qdma;
-+
-+      /* protect concurrent queue accesses */
-+      spinlock_t lock;
-+      struct airoha_queue_entry *entry;
-+      struct airoha_qdma_desc *desc;
-+      u16 head;
-+      u16 tail;
-+
-+      int queued;
-+      int ndesc;
-+      int free_thr;
-+      int buf_size;
-+
-+      struct napi_struct napi;
-+      struct page_pool *page_pool;
-+};
-+
-+struct airoha_tx_irq_queue {
-+      struct airoha_qdma *qdma;
-+
-+      struct napi_struct napi;
-+
-+      int size;
-+      u32 *q;
-+};
-+
-+struct airoha_hw_stats {
-+      /* protect concurrent hw_stats accesses */
-+      spinlock_t lock;
-+      struct u64_stats_sync syncp;
-+
-+      /* get_stats64 */
-+      u64 rx_ok_pkts;
-+      u64 tx_ok_pkts;
-+      u64 rx_ok_bytes;
-+      u64 tx_ok_bytes;
-+      u64 rx_multicast;
-+      u64 rx_errors;
-+      u64 rx_drops;
-+      u64 tx_drops;
-+      u64 rx_crc_error;
-+      u64 rx_over_errors;
-+      /* ethtool stats */
-+      u64 tx_broadcast;
-+      u64 tx_multicast;
-+      u64 tx_len[7];
-+      u64 rx_broadcast;
-+      u64 rx_fragment;
-+      u64 rx_jabber;
-+      u64 rx_len[7];
-+};
-+
-+struct airoha_qdma {
-+      struct airoha_eth *eth;
-+      void __iomem *regs;
-+
-+      /* protect concurrent irqmask accesses */
-+      spinlock_t irq_lock;
-+      u32 irqmask[QDMA_INT_REG_MAX];
-+      int irq;
-+
-+      struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
-+
-+      struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
-+      struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-+
-+      /* descriptor and packet buffers for qdma hw forward */
-+      struct {
-+              void *desc;
-+              void *q;
-+      } hfwd;
-+};
-+
-+struct airoha_gdm_port {
-+      struct airoha_qdma *qdma;
-+      struct net_device *dev;
-+      int id;
-+
-+      struct airoha_hw_stats stats;
-+
-+      DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
-+
-+      /* qos stats counters */
-+      u64 cpu_tx_packets;
-+      u64 fwd_tx_packets;
-+};
-+
-+struct airoha_eth {
-+      struct device *dev;
-+
-+      unsigned long state;
-+      void __iomem *fe_regs;
-+
-+      struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
-+      struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
-+
-+      struct net_device *napi_dev;
-+
-+      struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
-+      struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
-+};
-+
-+#endif /* AIROHA_ETH_H */
diff --git a/target/linux/airoha/patches-6.6/048-03-v6.15-net-airoha-Move-reg-write-utility-routines-in-airoha.patch b/target/linux/airoha/patches-6.6/048-03-v6.15-net-airoha-Move-reg-write-utility-routines-in-airoha.patch
deleted file mode 100644 (file)
index bf24638..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-From e0758a8694fbaffdc72940774db295585e951119 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:11 +0100
-Subject: [PATCH 03/15] net: airoha: Move reg/write utility routines in
- airoha_eth.h
-
-This is a preliminary patch to introduce flowtable hw offloading
-support for airoha_eth driver.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 28 +++---------------------
- drivers/net/ethernet/airoha/airoha_eth.h | 26 ++++++++++++++++++++++
- 2 files changed, 29 insertions(+), 25 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -673,17 +673,17 @@ struct airoha_qdma_fwd_desc {
-       __le32 rsv1;
- };
--static u32 airoha_rr(void __iomem *base, u32 offset)
-+u32 airoha_rr(void __iomem *base, u32 offset)
- {
-       return readl(base + offset);
- }
--static void airoha_wr(void __iomem *base, u32 offset, u32 val)
-+void airoha_wr(void __iomem *base, u32 offset, u32 val)
- {
-       writel(val, base + offset);
- }
--static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
-+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
- {
-       val |= (airoha_rr(base, offset) & ~mask);
-       airoha_wr(base, offset, val);
-@@ -691,28 +691,6 @@ static u32 airoha_rmw(void __iomem *base
-       return val;
- }
--#define airoha_fe_rr(eth, offset)                             \
--      airoha_rr((eth)->fe_regs, (offset))
--#define airoha_fe_wr(eth, offset, val)                                \
--      airoha_wr((eth)->fe_regs, (offset), (val))
--#define airoha_fe_rmw(eth, offset, mask, val)                 \
--      airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
--#define airoha_fe_set(eth, offset, val)                               \
--      airoha_rmw((eth)->fe_regs, (offset), 0, (val))
--#define airoha_fe_clear(eth, offset, val)                     \
--      airoha_rmw((eth)->fe_regs, (offset), (val), 0)
--
--#define airoha_qdma_rr(qdma, offset)                          \
--      airoha_rr((qdma)->regs, (offset))
--#define airoha_qdma_wr(qdma, offset, val)                     \
--      airoha_wr((qdma)->regs, (offset), (val))
--#define airoha_qdma_rmw(qdma, offset, mask, val)              \
--      airoha_rmw((qdma)->regs, (offset), (mask), (val))
--#define airoha_qdma_set(qdma, offset, val)                    \
--      airoha_rmw((qdma)->regs, (offset), 0, (val))
--#define airoha_qdma_clear(qdma, offset, val)                  \
--      airoha_rmw((qdma)->regs, (offset), (val), 0)
--
- static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
-                                   u32 clear, u32 set)
- {
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -248,4 +248,30 @@ struct airoha_eth {
-       struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
- };
-+u32 airoha_rr(void __iomem *base, u32 offset);
-+void airoha_wr(void __iomem *base, u32 offset, u32 val);
-+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val);
-+
-+#define airoha_fe_rr(eth, offset)                             \
-+      airoha_rr((eth)->fe_regs, (offset))
-+#define airoha_fe_wr(eth, offset, val)                                \
-+      airoha_wr((eth)->fe_regs, (offset), (val))
-+#define airoha_fe_rmw(eth, offset, mask, val)                 \
-+      airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
-+#define airoha_fe_set(eth, offset, val)                               \
-+      airoha_rmw((eth)->fe_regs, (offset), 0, (val))
-+#define airoha_fe_clear(eth, offset, val)                     \
-+      airoha_rmw((eth)->fe_regs, (offset), (val), 0)
-+
-+#define airoha_qdma_rr(qdma, offset)                          \
-+      airoha_rr((qdma)->regs, (offset))
-+#define airoha_qdma_wr(qdma, offset, val)                     \
-+      airoha_wr((qdma)->regs, (offset), (val))
-+#define airoha_qdma_rmw(qdma, offset, mask, val)              \
-+      airoha_rmw((qdma)->regs, (offset), (mask), (val))
-+#define airoha_qdma_set(qdma, offset, val)                    \
-+      airoha_rmw((qdma)->regs, (offset), 0, (val))
-+#define airoha_qdma_clear(qdma, offset, val)                  \
-+      airoha_rmw((qdma)->regs, (offset), (val), 0)
-+
- #endif /* AIROHA_ETH_H */
diff --git a/target/linux/airoha/patches-6.6/048-04-v6.15-net-airoha-Move-register-definitions-in-airoha_regs..patch b/target/linux/airoha/patches-6.6/048-04-v6.15-net-airoha-Move-register-definitions-in-airoha_regs..patch
deleted file mode 100644 (file)
index 3b2b8bf..0000000
+++ /dev/null
@@ -1,1361 +0,0 @@
-From ec663d9a82bf4d16721f6b1fc29df4892ba6c088 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:12 +0100
-Subject: [PATCH 04/15] net: airoha: Move register definitions in airoha_regs.h
-
-Move common airoha_eth register definitions in airoha_regs.h in order
-to reuse them for Packet Processor Engine (PPE) codebase.
-PPE module is used to enable support for flowtable hw offloading in
-airoha_eth driver.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c  | 659 +--------------------
- drivers/net/ethernet/airoha/airoha_regs.h | 670 ++++++++++++++++++++++
- 2 files changed, 671 insertions(+), 658 deletions(-)
- create mode 100644 drivers/net/ethernet/airoha/airoha_regs.h
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -13,666 +13,9 @@
- #include <net/pkt_cls.h>
- #include <uapi/linux/ppp_defs.h>
-+#include "airoha_regs.h"
- #include "airoha_eth.h"
--/* FE */
--#define PSE_BASE                      0x0100
--#define CSR_IFC_BASE                  0x0200
--#define CDM1_BASE                     0x0400
--#define GDM1_BASE                     0x0500
--#define PPE1_BASE                     0x0c00
--
--#define CDM2_BASE                     0x1400
--#define GDM2_BASE                     0x1500
--
--#define GDM3_BASE                     0x1100
--#define GDM4_BASE                     0x2500
--
--#define GDM_BASE(_n)                  \
--      ((_n) == 4 ? GDM4_BASE :        \
--       (_n) == 3 ? GDM3_BASE :        \
--       (_n) == 2 ? GDM2_BASE : GDM1_BASE)
--
--#define REG_FE_DMA_GLO_CFG            0x0000
--#define FE_DMA_GLO_L2_SPACE_MASK      GENMASK(7, 4)
--#define FE_DMA_GLO_PG_SZ_MASK         BIT(3)
--
--#define REG_FE_RST_GLO_CFG            0x0004
--#define FE_RST_GDM4_MBI_ARB_MASK      BIT(3)
--#define FE_RST_GDM3_MBI_ARB_MASK      BIT(2)
--#define FE_RST_CORE_MASK              BIT(0)
--
--#define REG_FE_WAN_MAC_H              0x0030
--#define REG_FE_LAN_MAC_H              0x0040
--
--#define REG_FE_MAC_LMIN(_n)           ((_n) + 0x04)
--#define REG_FE_MAC_LMAX(_n)           ((_n) + 0x08)
--
--#define REG_FE_CDM1_OQ_MAP0           0x0050
--#define REG_FE_CDM1_OQ_MAP1           0x0054
--#define REG_FE_CDM1_OQ_MAP2           0x0058
--#define REG_FE_CDM1_OQ_MAP3           0x005c
--
--#define REG_FE_PCE_CFG                        0x0070
--#define PCE_DPI_EN_MASK                       BIT(2)
--#define PCE_KA_EN_MASK                        BIT(1)
--#define PCE_MC_EN_MASK                        BIT(0)
--
--#define REG_FE_PSE_QUEUE_CFG_WR               0x0080
--#define PSE_CFG_PORT_ID_MASK          GENMASK(27, 24)
--#define PSE_CFG_QUEUE_ID_MASK         GENMASK(20, 16)
--#define PSE_CFG_WR_EN_MASK            BIT(8)
--#define PSE_CFG_OQRSV_SEL_MASK                BIT(0)
--
--#define REG_FE_PSE_QUEUE_CFG_VAL      0x0084
--#define PSE_CFG_OQ_RSV_MASK           GENMASK(13, 0)
--
--#define PSE_FQ_CFG                    0x008c
--#define PSE_FQ_LIMIT_MASK             GENMASK(14, 0)
--
--#define REG_FE_PSE_BUF_SET            0x0090
--#define PSE_SHARE_USED_LTHD_MASK      GENMASK(31, 16)
--#define PSE_ALLRSV_MASK                       GENMASK(14, 0)
--
--#define REG_PSE_SHARE_USED_THD                0x0094
--#define PSE_SHARE_USED_MTHD_MASK      GENMASK(31, 16)
--#define PSE_SHARE_USED_HTHD_MASK      GENMASK(15, 0)
--
--#define REG_GDM_MISC_CFG              0x0148
--#define GDM2_RDM_ACK_WAIT_PREF_MASK   BIT(9)
--#define GDM2_CHN_VLD_MODE_MASK                BIT(5)
--
--#define REG_FE_CSR_IFC_CFG            CSR_IFC_BASE
--#define FE_IFC_EN_MASK                        BIT(0)
--
--#define REG_FE_VIP_PORT_EN            0x01f0
--#define REG_FE_IFC_PORT_EN            0x01f4
--
--#define REG_PSE_IQ_REV1                       (PSE_BASE + 0x08)
--#define PSE_IQ_RES1_P2_MASK           GENMASK(23, 16)
--
--#define REG_PSE_IQ_REV2                       (PSE_BASE + 0x0c)
--#define PSE_IQ_RES2_P5_MASK           GENMASK(15, 8)
--#define PSE_IQ_RES2_P4_MASK           GENMASK(7, 0)
--
--#define REG_FE_VIP_EN(_n)             (0x0300 + ((_n) << 3))
--#define PATN_FCPU_EN_MASK             BIT(7)
--#define PATN_SWP_EN_MASK              BIT(6)
--#define PATN_DP_EN_MASK                       BIT(5)
--#define PATN_SP_EN_MASK                       BIT(4)
--#define PATN_TYPE_MASK                        GENMASK(3, 1)
--#define PATN_EN_MASK                  BIT(0)
--
--#define REG_FE_VIP_PATN(_n)           (0x0304 + ((_n) << 3))
--#define PATN_DP_MASK                  GENMASK(31, 16)
--#define PATN_SP_MASK                  GENMASK(15, 0)
--
--#define REG_CDM1_VLAN_CTRL            CDM1_BASE
--#define CDM1_VLAN_MASK                        GENMASK(31, 16)
--
--#define REG_CDM1_FWD_CFG              (CDM1_BASE + 0x08)
--#define CDM1_VIP_QSEL_MASK            GENMASK(24, 20)
--
--#define REG_CDM1_CRSN_QSEL(_n)                (CDM1_BASE + 0x10 + ((_n) << 2))
--#define CDM1_CRSN_QSEL_REASON_MASK(_n)        \
--      GENMASK(4 + (((_n) % 4) << 3),  (((_n) % 4) << 3))
--
--#define REG_CDM2_FWD_CFG              (CDM2_BASE + 0x08)
--#define CDM2_OAM_QSEL_MASK            GENMASK(31, 27)
--#define CDM2_VIP_QSEL_MASK            GENMASK(24, 20)
--
--#define REG_CDM2_CRSN_QSEL(_n)                (CDM2_BASE + 0x10 + ((_n) << 2))
--#define CDM2_CRSN_QSEL_REASON_MASK(_n)        \
--      GENMASK(4 + (((_n) % 4) << 3),  (((_n) % 4) << 3))
--
--#define REG_GDM_FWD_CFG(_n)           GDM_BASE(_n)
--#define GDM_DROP_CRC_ERR              BIT(23)
--#define GDM_IP4_CKSUM                 BIT(22)
--#define GDM_TCP_CKSUM                 BIT(21)
--#define GDM_UDP_CKSUM                 BIT(20)
--#define GDM_UCFQ_MASK                 GENMASK(15, 12)
--#define GDM_BCFQ_MASK                 GENMASK(11, 8)
--#define GDM_MCFQ_MASK                 GENMASK(7, 4)
--#define GDM_OCFQ_MASK                 GENMASK(3, 0)
--
--#define REG_GDM_INGRESS_CFG(_n)               (GDM_BASE(_n) + 0x10)
--#define GDM_INGRESS_FC_EN_MASK                BIT(1)
--#define GDM_STAG_EN_MASK              BIT(0)
--
--#define REG_GDM_LEN_CFG(_n)           (GDM_BASE(_n) + 0x14)
--#define GDM_SHORT_LEN_MASK            GENMASK(13, 0)
--#define GDM_LONG_LEN_MASK             GENMASK(29, 16)
--
--#define REG_FE_CPORT_CFG              (GDM1_BASE + 0x40)
--#define FE_CPORT_PAD                  BIT(26)
--#define FE_CPORT_PORT_XFC_MASK                BIT(25)
--#define FE_CPORT_QUEUE_XFC_MASK               BIT(24)
--
--#define REG_FE_GDM_MIB_CLEAR(_n)      (GDM_BASE(_n) + 0xf0)
--#define FE_GDM_MIB_RX_CLEAR_MASK      BIT(1)
--#define FE_GDM_MIB_TX_CLEAR_MASK      BIT(0)
--
--#define REG_FE_GDM1_MIB_CFG           (GDM1_BASE + 0xf4)
--#define FE_STRICT_RFC2819_MODE_MASK   BIT(31)
--#define FE_GDM1_TX_MIB_SPLIT_EN_MASK  BIT(17)
--#define FE_GDM1_RX_MIB_SPLIT_EN_MASK  BIT(16)
--#define FE_TX_MIB_ID_MASK             GENMASK(15, 8)
--#define FE_RX_MIB_ID_MASK             GENMASK(7, 0)
--
--#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n)                (GDM_BASE(_n) + 0x104)
--#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n)               (GDM_BASE(_n) + 0x10c)
--#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n)               (GDM_BASE(_n) + 0x110)
--#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n)      (GDM_BASE(_n) + 0x114)
--#define REG_FE_GDM_TX_ETH_DROP_CNT(_n)                (GDM_BASE(_n) + 0x118)
--#define REG_FE_GDM_TX_ETH_BC_CNT(_n)          (GDM_BASE(_n) + 0x11c)
--#define REG_FE_GDM_TX_ETH_MC_CNT(_n)          (GDM_BASE(_n) + 0x120)
--#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n)                (GDM_BASE(_n) + 0x124)
--#define REG_FE_GDM_TX_ETH_LONG_CNT(_n)                (GDM_BASE(_n) + 0x128)
--#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n)               (GDM_BASE(_n) + 0x12c)
--#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n)               (GDM_BASE(_n) + 0x130)
--#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n)      (GDM_BASE(_n) + 0x134)
--#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n)      (GDM_BASE(_n) + 0x138)
--#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n)      (GDM_BASE(_n) + 0x13c)
--#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n)     (GDM_BASE(_n) + 0x140)
--
--#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n)                (GDM_BASE(_n) + 0x148)
--#define REG_FE_GDM_RX_FC_DROP_CNT(_n)         (GDM_BASE(_n) + 0x14c)
--#define REG_FE_GDM_RX_RC_DROP_CNT(_n)         (GDM_BASE(_n) + 0x150)
--#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n)   (GDM_BASE(_n) + 0x154)
--#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n)      (GDM_BASE(_n) + 0x158)
--#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n)               (GDM_BASE(_n) + 0x15c)
--#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n)               (GDM_BASE(_n) + 0x160)
--#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n)      (GDM_BASE(_n) + 0x164)
--#define REG_FE_GDM_RX_ETH_DROP_CNT(_n)                (GDM_BASE(_n) + 0x168)
--#define REG_FE_GDM_RX_ETH_BC_CNT(_n)          (GDM_BASE(_n) + 0x16c)
--#define REG_FE_GDM_RX_ETH_MC_CNT(_n)          (GDM_BASE(_n) + 0x170)
--#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n)     (GDM_BASE(_n) + 0x174)
--#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n)                (GDM_BASE(_n) + 0x178)
--#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n)      (GDM_BASE(_n) + 0x17c)
--#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n)                (GDM_BASE(_n) + 0x180)
--#define REG_FE_GDM_RX_ETH_LONG_CNT(_n)                (GDM_BASE(_n) + 0x184)
--#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n)               (GDM_BASE(_n) + 0x188)
--#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n)               (GDM_BASE(_n) + 0x18c)
--#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n)      (GDM_BASE(_n) + 0x190)
--#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n)      (GDM_BASE(_n) + 0x194)
--#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n)      (GDM_BASE(_n) + 0x198)
--#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n)     (GDM_BASE(_n) + 0x19c)
--
--#define REG_PPE1_TB_HASH_CFG          (PPE1_BASE + 0x250)
--#define PPE1_SRAM_TABLE_EN_MASK               BIT(0)
--#define PPE1_SRAM_HASH1_EN_MASK               BIT(8)
--#define PPE1_DRAM_TABLE_EN_MASK               BIT(16)
--#define PPE1_DRAM_HASH1_EN_MASK               BIT(24)
--
--#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n)                (GDM_BASE(_n) + 0x280)
--#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n)               (GDM_BASE(_n) + 0x284)
--#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n)               (GDM_BASE(_n) + 0x288)
--#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n)      (GDM_BASE(_n) + 0x28c)
--
--#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n)                (GDM_BASE(_n) + 0x290)
--#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n)               (GDM_BASE(_n) + 0x294)
--#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n)               (GDM_BASE(_n) + 0x298)
--#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n)      (GDM_BASE(_n) + 0x29c)
--#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n)               (GDM_BASE(_n) + 0x2b8)
--#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n)               (GDM_BASE(_n) + 0x2bc)
--#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n)      (GDM_BASE(_n) + 0x2c0)
--#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n)      (GDM_BASE(_n) + 0x2c4)
--#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n)      (GDM_BASE(_n) + 0x2c8)
--#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n)     (GDM_BASE(_n) + 0x2cc)
--#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n)               (GDM_BASE(_n) + 0x2e8)
--#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n)               (GDM_BASE(_n) + 0x2ec)
--#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n)      (GDM_BASE(_n) + 0x2f0)
--#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n)      (GDM_BASE(_n) + 0x2f4)
--#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n)      (GDM_BASE(_n) + 0x2f8)
--#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n)     (GDM_BASE(_n) + 0x2fc)
--
--#define REG_GDM2_CHN_RLS              (GDM2_BASE + 0x20)
--#define MBI_RX_AGE_SEL_MASK           GENMASK(26, 25)
--#define MBI_TX_AGE_SEL_MASK           GENMASK(18, 17)
--
--#define REG_GDM3_FWD_CFG              GDM3_BASE
--#define GDM3_PAD_EN_MASK              BIT(28)
--
--#define REG_GDM4_FWD_CFG              GDM4_BASE
--#define GDM4_PAD_EN_MASK              BIT(28)
--#define GDM4_SPORT_OFFSET0_MASK               GENMASK(11, 8)
--
--#define REG_GDM4_SRC_PORT_SET         (GDM4_BASE + 0x23c)
--#define GDM4_SPORT_OFF2_MASK          GENMASK(19, 16)
--#define GDM4_SPORT_OFF1_MASK          GENMASK(15, 12)
--#define GDM4_SPORT_OFF0_MASK          GENMASK(11, 8)
--
--#define REG_IP_FRAG_FP                        0x2010
--#define IP_ASSEMBLE_PORT_MASK         GENMASK(24, 21)
--#define IP_ASSEMBLE_NBQ_MASK          GENMASK(20, 16)
--#define IP_FRAGMENT_PORT_MASK         GENMASK(8, 5)
--#define IP_FRAGMENT_NBQ_MASK          GENMASK(4, 0)
--
--#define REG_MC_VLAN_EN                        0x2100
--#define MC_VLAN_EN_MASK                       BIT(0)
--
--#define REG_MC_VLAN_CFG                       0x2104
--#define MC_VLAN_CFG_CMD_DONE_MASK     BIT(31)
--#define MC_VLAN_CFG_TABLE_ID_MASK     GENMASK(21, 16)
--#define MC_VLAN_CFG_PORT_ID_MASK      GENMASK(11, 8)
--#define MC_VLAN_CFG_TABLE_SEL_MASK    BIT(4)
--#define MC_VLAN_CFG_RW_MASK           BIT(0)
--
--#define REG_MC_VLAN_DATA              0x2108
--
--#define REG_CDM5_RX_OQ1_DROP_CNT      0x29d4
--
--/* QDMA */
--#define REG_QDMA_GLOBAL_CFG                   0x0004
--#define GLOBAL_CFG_RX_2B_OFFSET_MASK          BIT(31)
--#define GLOBAL_CFG_DMA_PREFERENCE_MASK                GENMASK(30, 29)
--#define GLOBAL_CFG_CPU_TXR_RR_MASK            BIT(28)
--#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK                BIT(27)
--#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK     BIT(26)
--#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK   BIT(25)
--#define GLOBAL_CFG_OAM_MODIFY_MASK            BIT(24)
--#define GLOBAL_CFG_RESET_MASK                 BIT(23)
--#define GLOBAL_CFG_RESET_DONE_MASK            BIT(22)
--#define GLOBAL_CFG_MULTICAST_EN_MASK          BIT(21)
--#define GLOBAL_CFG_IRQ1_EN_MASK                       BIT(20)
--#define GLOBAL_CFG_IRQ0_EN_MASK                       BIT(19)
--#define GLOBAL_CFG_LOOPCNT_EN_MASK            BIT(18)
--#define GLOBAL_CFG_RD_BYPASS_WR_MASK          BIT(17)
--#define GLOBAL_CFG_QDMA_LOOPBACK_MASK         BIT(16)
--#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK          GENMASK(13, 8)
--#define GLOBAL_CFG_CHECK_DONE_MASK            BIT(7)
--#define GLOBAL_CFG_TX_WB_DONE_MASK            BIT(6)
--#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK         GENMASK(5, 4)
--#define GLOBAL_CFG_RX_DMA_BUSY_MASK           BIT(3)
--#define GLOBAL_CFG_RX_DMA_EN_MASK             BIT(2)
--#define GLOBAL_CFG_TX_DMA_BUSY_MASK           BIT(1)
--#define GLOBAL_CFG_TX_DMA_EN_MASK             BIT(0)
--
--#define REG_FWD_DSCP_BASE                     0x0010
--#define REG_FWD_BUF_BASE                      0x0014
--
--#define REG_HW_FWD_DSCP_CFG                   0x0018
--#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK         GENMASK(29, 28)
--#define HW_FWD_DSCP_SCATTER_LEN_MASK          GENMASK(17, 16)
--#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK      GENMASK(15, 0)
--
--#define REG_INT_STATUS(_n)            \
--      (((_n) == 4) ? 0x0730 :         \
--       ((_n) == 3) ? 0x0724 :         \
--       ((_n) == 2) ? 0x0720 :         \
--       ((_n) == 1) ? 0x0024 : 0x0020)
--
--#define REG_INT_ENABLE(_n)            \
--      (((_n) == 4) ? 0x0750 :         \
--       ((_n) == 3) ? 0x0744 :         \
--       ((_n) == 2) ? 0x0740 :         \
--       ((_n) == 1) ? 0x002c : 0x0028)
--
--/* QDMA_CSR_INT_ENABLE1 */
--#define RX15_COHERENT_INT_MASK                BIT(31)
--#define RX14_COHERENT_INT_MASK                BIT(30)
--#define RX13_COHERENT_INT_MASK                BIT(29)
--#define RX12_COHERENT_INT_MASK                BIT(28)
--#define RX11_COHERENT_INT_MASK                BIT(27)
--#define RX10_COHERENT_INT_MASK                BIT(26)
--#define RX9_COHERENT_INT_MASK         BIT(25)
--#define RX8_COHERENT_INT_MASK         BIT(24)
--#define RX7_COHERENT_INT_MASK         BIT(23)
--#define RX6_COHERENT_INT_MASK         BIT(22)
--#define RX5_COHERENT_INT_MASK         BIT(21)
--#define RX4_COHERENT_INT_MASK         BIT(20)
--#define RX3_COHERENT_INT_MASK         BIT(19)
--#define RX2_COHERENT_INT_MASK         BIT(18)
--#define RX1_COHERENT_INT_MASK         BIT(17)
--#define RX0_COHERENT_INT_MASK         BIT(16)
--#define TX7_COHERENT_INT_MASK         BIT(15)
--#define TX6_COHERENT_INT_MASK         BIT(14)
--#define TX5_COHERENT_INT_MASK         BIT(13)
--#define TX4_COHERENT_INT_MASK         BIT(12)
--#define TX3_COHERENT_INT_MASK         BIT(11)
--#define TX2_COHERENT_INT_MASK         BIT(10)
--#define TX1_COHERENT_INT_MASK         BIT(9)
--#define TX0_COHERENT_INT_MASK         BIT(8)
--#define CNT_OVER_FLOW_INT_MASK                BIT(7)
--#define IRQ1_FULL_INT_MASK            BIT(5)
--#define IRQ1_INT_MASK                 BIT(4)
--#define HWFWD_DSCP_LOW_INT_MASK               BIT(3)
--#define HWFWD_DSCP_EMPTY_INT_MASK     BIT(2)
--#define IRQ0_FULL_INT_MASK            BIT(1)
--#define IRQ0_INT_MASK                 BIT(0)
--
--#define TX_DONE_INT_MASK(_n)                                  \
--      ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK              \
--            : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
--
--#define INT_TX_MASK                                           \
--      (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK |                   \
--       IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
--
--#define INT_IDX0_MASK                                         \
--      (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK |        \
--       TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK |        \
--       TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK |        \
--       TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK |        \
--       RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK |        \
--       RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK |        \
--       RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK |        \
--       RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK |        \
--       RX15_COHERENT_INT_MASK | INT_TX_MASK)
--
--/* QDMA_CSR_INT_ENABLE2 */
--#define RX15_NO_CPU_DSCP_INT_MASK     BIT(31)
--#define RX14_NO_CPU_DSCP_INT_MASK     BIT(30)
--#define RX13_NO_CPU_DSCP_INT_MASK     BIT(29)
--#define RX12_NO_CPU_DSCP_INT_MASK     BIT(28)
--#define RX11_NO_CPU_DSCP_INT_MASK     BIT(27)
--#define RX10_NO_CPU_DSCP_INT_MASK     BIT(26)
--#define RX9_NO_CPU_DSCP_INT_MASK      BIT(25)
--#define RX8_NO_CPU_DSCP_INT_MASK      BIT(24)
--#define RX7_NO_CPU_DSCP_INT_MASK      BIT(23)
--#define RX6_NO_CPU_DSCP_INT_MASK      BIT(22)
--#define RX5_NO_CPU_DSCP_INT_MASK      BIT(21)
--#define RX4_NO_CPU_DSCP_INT_MASK      BIT(20)
--#define RX3_NO_CPU_DSCP_INT_MASK      BIT(19)
--#define RX2_NO_CPU_DSCP_INT_MASK      BIT(18)
--#define RX1_NO_CPU_DSCP_INT_MASK      BIT(17)
--#define RX0_NO_CPU_DSCP_INT_MASK      BIT(16)
--#define RX15_DONE_INT_MASK            BIT(15)
--#define RX14_DONE_INT_MASK            BIT(14)
--#define RX13_DONE_INT_MASK            BIT(13)
--#define RX12_DONE_INT_MASK            BIT(12)
--#define RX11_DONE_INT_MASK            BIT(11)
--#define RX10_DONE_INT_MASK            BIT(10)
--#define RX9_DONE_INT_MASK             BIT(9)
--#define RX8_DONE_INT_MASK             BIT(8)
--#define RX7_DONE_INT_MASK             BIT(7)
--#define RX6_DONE_INT_MASK             BIT(6)
--#define RX5_DONE_INT_MASK             BIT(5)
--#define RX4_DONE_INT_MASK             BIT(4)
--#define RX3_DONE_INT_MASK             BIT(3)
--#define RX2_DONE_INT_MASK             BIT(2)
--#define RX1_DONE_INT_MASK             BIT(1)
--#define RX0_DONE_INT_MASK             BIT(0)
--
--#define RX_DONE_INT_MASK                                      \
--      (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK |                \
--       RX2_DONE_INT_MASK | RX3_DONE_INT_MASK |                \
--       RX4_DONE_INT_MASK | RX7_DONE_INT_MASK |                \
--       RX8_DONE_INT_MASK | RX9_DONE_INT_MASK |                \
--       RX15_DONE_INT_MASK)
--#define INT_IDX1_MASK                                         \
--      (RX_DONE_INT_MASK |                                     \
--       RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK |  \
--       RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK |  \
--       RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK |  \
--       RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK |  \
--       RX15_NO_CPU_DSCP_INT_MASK)
--
--/* QDMA_CSR_INT_ENABLE5 */
--#define TX31_COHERENT_INT_MASK                BIT(31)
--#define TX30_COHERENT_INT_MASK                BIT(30)
--#define TX29_COHERENT_INT_MASK                BIT(29)
--#define TX28_COHERENT_INT_MASK                BIT(28)
--#define TX27_COHERENT_INT_MASK                BIT(27)
--#define TX26_COHERENT_INT_MASK                BIT(26)
--#define TX25_COHERENT_INT_MASK                BIT(25)
--#define TX24_COHERENT_INT_MASK                BIT(24)
--#define TX23_COHERENT_INT_MASK                BIT(23)
--#define TX22_COHERENT_INT_MASK                BIT(22)
--#define TX21_COHERENT_INT_MASK                BIT(21)
--#define TX20_COHERENT_INT_MASK                BIT(20)
--#define TX19_COHERENT_INT_MASK                BIT(19)
--#define TX18_COHERENT_INT_MASK                BIT(18)
--#define TX17_COHERENT_INT_MASK                BIT(17)
--#define TX16_COHERENT_INT_MASK                BIT(16)
--#define TX15_COHERENT_INT_MASK                BIT(15)
--#define TX14_COHERENT_INT_MASK                BIT(14)
--#define TX13_COHERENT_INT_MASK                BIT(13)
--#define TX12_COHERENT_INT_MASK                BIT(12)
--#define TX11_COHERENT_INT_MASK                BIT(11)
--#define TX10_COHERENT_INT_MASK                BIT(10)
--#define TX9_COHERENT_INT_MASK         BIT(9)
--#define TX8_COHERENT_INT_MASK         BIT(8)
--
--#define INT_IDX4_MASK                                         \
--      (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK |        \
--       TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK |      \
--       TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK |      \
--       TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK |      \
--       TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK |      \
--       TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK |      \
--       TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK |      \
--       TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK |      \
--       TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK |      \
--       TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK |      \
--       TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK |      \
--       TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
--
--#define REG_TX_IRQ_BASE(_n)           ((_n) ? 0x0048 : 0x0050)
--
--#define REG_TX_IRQ_CFG(_n)            ((_n) ? 0x004c : 0x0054)
--#define TX_IRQ_THR_MASK                       GENMASK(27, 16)
--#define TX_IRQ_DEPTH_MASK             GENMASK(11, 0)
--
--#define REG_IRQ_CLEAR_LEN(_n)         ((_n) ? 0x0064 : 0x0058)
--#define IRQ_CLEAR_LEN_MASK            GENMASK(7, 0)
--
--#define REG_IRQ_STATUS(_n)            ((_n) ? 0x0068 : 0x005c)
--#define IRQ_ENTRY_LEN_MASK            GENMASK(27, 16)
--#define IRQ_HEAD_IDX_MASK             GENMASK(11, 0)
--
--#define REG_TX_RING_BASE(_n)  \
--      (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
--
--#define REG_TX_RING_BLOCKING(_n)      \
--      (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
--
--#define TX_RING_IRQ_BLOCKING_MAP_MASK                 BIT(6)
--#define TX_RING_IRQ_BLOCKING_CFG_MASK                 BIT(4)
--#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK          BIT(2)
--#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK    BIT(1)
--#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK    BIT(0)
--
--#define REG_TX_CPU_IDX(_n)    \
--      (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
--
--#define TX_RING_CPU_IDX_MASK          GENMASK(15, 0)
--
--#define REG_TX_DMA_IDX(_n)    \
--      (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
--
--#define TX_RING_DMA_IDX_MASK          GENMASK(15, 0)
--
--#define IRQ_RING_IDX_MASK             GENMASK(20, 16)
--#define IRQ_DESC_IDX_MASK             GENMASK(15, 0)
--
--#define REG_RX_RING_BASE(_n)  \
--      (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
--
--#define REG_RX_RING_SIZE(_n)  \
--      (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
--
--#define RX_RING_THR_MASK              GENMASK(31, 16)
--#define RX_RING_SIZE_MASK             GENMASK(15, 0)
--
--#define REG_RX_CPU_IDX(_n)    \
--      (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
--
--#define RX_RING_CPU_IDX_MASK          GENMASK(15, 0)
--
--#define REG_RX_DMA_IDX(_n)    \
--      (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
--
--#define REG_RX_DELAY_INT_IDX(_n)      \
--      (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
--
--#define RX_DELAY_INT_MASK             GENMASK(15, 0)
--
--#define RX_RING_DMA_IDX_MASK          GENMASK(15, 0)
--
--#define REG_INGRESS_TRTCM_CFG         0x0070
--#define INGRESS_TRTCM_EN_MASK         BIT(31)
--#define INGRESS_TRTCM_MODE_MASK               BIT(30)
--#define INGRESS_SLOW_TICK_RATIO_MASK  GENMASK(29, 16)
--#define INGRESS_FAST_TICK_MASK                GENMASK(15, 0)
--
--#define REG_QUEUE_CLOSE_CFG(_n)               (0x00a0 + ((_n) & 0xfc))
--#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m)   BIT((_m) + (((_n) & 0x3) << 3))
--
--#define REG_TXQ_DIS_CFG_BASE(_n)      ((_n) ? 0x20a0 : 0x00a0)
--#define REG_TXQ_DIS_CFG(_n, _m)               (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
--
--#define REG_CNTR_CFG(_n)              (0x0400 + ((_n) << 3))
--#define CNTR_EN_MASK                  BIT(31)
--#define CNTR_ALL_CHAN_EN_MASK         BIT(30)
--#define CNTR_ALL_QUEUE_EN_MASK                BIT(29)
--#define CNTR_ALL_DSCP_RING_EN_MASK    BIT(28)
--#define CNTR_SRC_MASK                 GENMASK(27, 24)
--#define CNTR_DSCP_RING_MASK           GENMASK(20, 16)
--#define CNTR_CHAN_MASK                        GENMASK(7, 3)
--#define CNTR_QUEUE_MASK                       GENMASK(2, 0)
--
--#define REG_CNTR_VAL(_n)              (0x0404 + ((_n) << 3))
--
--#define REG_LMGR_INIT_CFG             0x1000
--#define LMGR_INIT_START                       BIT(31)
--#define LMGR_SRAM_MODE_MASK           BIT(30)
--#define HW_FWD_PKTSIZE_OVERHEAD_MASK  GENMASK(27, 20)
--#define HW_FWD_DESC_NUM_MASK          GENMASK(16, 0)
--
--#define REG_FWD_DSCP_LOW_THR          0x1004
--#define FWD_DSCP_LOW_THR_MASK         GENMASK(17, 0)
--
--#define REG_EGRESS_RATE_METER_CFG             0x100c
--#define EGRESS_RATE_METER_EN_MASK             BIT(31)
--#define EGRESS_RATE_METER_EQ_RATE_EN_MASK     BIT(17)
--#define EGRESS_RATE_METER_WINDOW_SZ_MASK      GENMASK(16, 12)
--#define EGRESS_RATE_METER_TIMESLICE_MASK      GENMASK(10, 0)
--
--#define REG_EGRESS_TRTCM_CFG          0x1010
--#define EGRESS_TRTCM_EN_MASK          BIT(31)
--#define EGRESS_TRTCM_MODE_MASK                BIT(30)
--#define EGRESS_SLOW_TICK_RATIO_MASK   GENMASK(29, 16)
--#define EGRESS_FAST_TICK_MASK         GENMASK(15, 0)
--
--#define TRTCM_PARAM_RW_MASK           BIT(31)
--#define TRTCM_PARAM_RW_DONE_MASK      BIT(30)
--#define TRTCM_PARAM_TYPE_MASK         GENMASK(29, 28)
--#define TRTCM_METER_GROUP_MASK                GENMASK(27, 26)
--#define TRTCM_PARAM_INDEX_MASK                GENMASK(23, 17)
--#define TRTCM_PARAM_RATE_TYPE_MASK    BIT(16)
--
--#define REG_TRTCM_CFG_PARAM(_n)               ((_n) + 0x4)
--#define REG_TRTCM_DATA_LOW(_n)                ((_n) + 0x8)
--#define REG_TRTCM_DATA_HIGH(_n)               ((_n) + 0xc)
--
--#define REG_TXWRR_MODE_CFG            0x1020
--#define TWRR_WEIGHT_SCALE_MASK                BIT(31)
--#define TWRR_WEIGHT_BASE_MASK         BIT(3)
--
--#define REG_TXWRR_WEIGHT_CFG          0x1024
--#define TWRR_RW_CMD_MASK              BIT(31)
--#define TWRR_RW_CMD_DONE              BIT(30)
--#define TWRR_CHAN_IDX_MASK            GENMASK(23, 19)
--#define TWRR_QUEUE_IDX_MASK           GENMASK(18, 16)
--#define TWRR_VALUE_MASK                       GENMASK(15, 0)
--
--#define REG_PSE_BUF_USAGE_CFG         0x1028
--#define PSE_BUF_ESTIMATE_EN_MASK      BIT(29)
--
--#define REG_CHAN_QOS_MODE(_n)         (0x1040 + ((_n) << 2))
--#define CHAN_QOS_MODE_MASK(_n)                GENMASK(2 + ((_n) << 2), (_n) << 2)
--
--#define REG_GLB_TRTCM_CFG             0x1080
--#define GLB_TRTCM_EN_MASK             BIT(31)
--#define GLB_TRTCM_MODE_MASK           BIT(30)
--#define GLB_SLOW_TICK_RATIO_MASK      GENMASK(29, 16)
--#define GLB_FAST_TICK_MASK            GENMASK(15, 0)
--
--#define REG_TXQ_CNGST_CFG             0x10a0
--#define TXQ_CNGST_DROP_EN             BIT(31)
--#define TXQ_CNGST_DEI_DROP_EN         BIT(30)
--
--#define REG_SLA_TRTCM_CFG             0x1150
--#define SLA_TRTCM_EN_MASK             BIT(31)
--#define SLA_TRTCM_MODE_MASK           BIT(30)
--#define SLA_SLOW_TICK_RATIO_MASK      GENMASK(29, 16)
--#define SLA_FAST_TICK_MASK            GENMASK(15, 0)
--
--/* CTRL */
--#define QDMA_DESC_DONE_MASK           BIT(31)
--#define QDMA_DESC_DROP_MASK           BIT(30) /* tx: drop - rx: overflow */
--#define QDMA_DESC_MORE_MASK           BIT(29) /* more SG elements */
--#define QDMA_DESC_DEI_MASK            BIT(25)
--#define QDMA_DESC_NO_DROP_MASK                BIT(24)
--#define QDMA_DESC_LEN_MASK            GENMASK(15, 0)
--/* DATA */
--#define QDMA_DESC_NEXT_ID_MASK                GENMASK(15, 0)
--/* TX MSG0 */
--#define QDMA_ETH_TXMSG_MIC_IDX_MASK   BIT(30)
--#define QDMA_ETH_TXMSG_SP_TAG_MASK    GENMASK(29, 14)
--#define QDMA_ETH_TXMSG_ICO_MASK               BIT(13)
--#define QDMA_ETH_TXMSG_UCO_MASK               BIT(12)
--#define QDMA_ETH_TXMSG_TCO_MASK               BIT(11)
--#define QDMA_ETH_TXMSG_TSO_MASK               BIT(10)
--#define QDMA_ETH_TXMSG_FAST_MASK      BIT(9)
--#define QDMA_ETH_TXMSG_OAM_MASK               BIT(8)
--#define QDMA_ETH_TXMSG_CHAN_MASK      GENMASK(7, 3)
--#define QDMA_ETH_TXMSG_QUEUE_MASK     GENMASK(2, 0)
--/* TX MSG1 */
--#define QDMA_ETH_TXMSG_NO_DROP                BIT(31)
--#define QDMA_ETH_TXMSG_METER_MASK     GENMASK(30, 24) /* 0x7f no meters */
--#define QDMA_ETH_TXMSG_FPORT_MASK     GENMASK(23, 20)
--#define QDMA_ETH_TXMSG_NBOQ_MASK      GENMASK(19, 15)
--#define QDMA_ETH_TXMSG_HWF_MASK               BIT(14)
--#define QDMA_ETH_TXMSG_HOP_MASK               BIT(13)
--#define QDMA_ETH_TXMSG_PTP_MASK               BIT(12)
--#define QDMA_ETH_TXMSG_ACNT_G1_MASK   GENMASK(10, 6)  /* 0x1f do not count */
--#define QDMA_ETH_TXMSG_ACNT_G0_MASK   GENMASK(5, 0)   /* 0x3f do not count */
--
--/* RX MSG1 */
--#define QDMA_ETH_RXMSG_DEI_MASK               BIT(31)
--#define QDMA_ETH_RXMSG_IP6_MASK               BIT(30)
--#define QDMA_ETH_RXMSG_IP4_MASK               BIT(29)
--#define QDMA_ETH_RXMSG_IP4F_MASK      BIT(28)
--#define QDMA_ETH_RXMSG_L4_VALID_MASK  BIT(27)
--#define QDMA_ETH_RXMSG_L4F_MASK               BIT(26)
--#define QDMA_ETH_RXMSG_SPORT_MASK     GENMASK(25, 21)
--#define QDMA_ETH_RXMSG_CRSN_MASK      GENMASK(20, 16)
--#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
--
--struct airoha_qdma_desc {
--      __le32 rsv;
--      __le32 ctrl;
--      __le32 addr;
--      __le32 data;
--      __le32 msg0;
--      __le32 msg1;
--      __le32 msg2;
--      __le32 msg3;
--};
--
--/* CTRL0 */
--#define QDMA_FWD_DESC_CTX_MASK                BIT(31)
--#define QDMA_FWD_DESC_RING_MASK               GENMASK(30, 28)
--#define QDMA_FWD_DESC_IDX_MASK                GENMASK(27, 16)
--#define QDMA_FWD_DESC_LEN_MASK                GENMASK(15, 0)
--/* CTRL1 */
--#define QDMA_FWD_DESC_FIRST_IDX_MASK  GENMASK(15, 0)
--/* CTRL2 */
--#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK       GENMASK(2, 0)
--
--struct airoha_qdma_fwd_desc {
--      __le32 addr;
--      __le32 ctrl0;
--      __le32 ctrl1;
--      __le32 ctrl2;
--      __le32 msg0;
--      __le32 msg1;
--      __le32 rsv0;
--      __le32 rsv1;
--};
--
- u32 airoha_rr(void __iomem *base, u32 offset)
- {
-       return readl(base + offset);
---- /dev/null
-+++ b/drivers/net/ethernet/airoha/airoha_regs.h
-@@ -0,0 +1,670 @@
-+/* SPDX-License-Identifier: GPL-2.0-only */
-+/*
-+ * Copyright (c) 2024 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ */
-+
-+#ifndef AIROHA_REGS_H
-+#define AIROHA_REGS_H
-+
-+#include <linux/types.h>
-+
-+/* FE */
-+#define PSE_BASE                      0x0100
-+#define CSR_IFC_BASE                  0x0200
-+#define CDM1_BASE                     0x0400
-+#define GDM1_BASE                     0x0500
-+#define PPE1_BASE                     0x0c00
-+
-+#define CDM2_BASE                     0x1400
-+#define GDM2_BASE                     0x1500
-+
-+#define GDM3_BASE                     0x1100
-+#define GDM4_BASE                     0x2500
-+
-+#define GDM_BASE(_n)                  \
-+      ((_n) == 4 ? GDM4_BASE :        \
-+       (_n) == 3 ? GDM3_BASE :        \
-+       (_n) == 2 ? GDM2_BASE : GDM1_BASE)
-+
-+#define REG_FE_DMA_GLO_CFG            0x0000
-+#define FE_DMA_GLO_L2_SPACE_MASK      GENMASK(7, 4)
-+#define FE_DMA_GLO_PG_SZ_MASK         BIT(3)
-+
-+#define REG_FE_RST_GLO_CFG            0x0004
-+#define FE_RST_GDM4_MBI_ARB_MASK      BIT(3)
-+#define FE_RST_GDM3_MBI_ARB_MASK      BIT(2)
-+#define FE_RST_CORE_MASK              BIT(0)
-+
-+#define REG_FE_WAN_MAC_H              0x0030
-+#define REG_FE_LAN_MAC_H              0x0040
-+
-+#define REG_FE_MAC_LMIN(_n)           ((_n) + 0x04)
-+#define REG_FE_MAC_LMAX(_n)           ((_n) + 0x08)
-+
-+#define REG_FE_CDM1_OQ_MAP0           0x0050
-+#define REG_FE_CDM1_OQ_MAP1           0x0054
-+#define REG_FE_CDM1_OQ_MAP2           0x0058
-+#define REG_FE_CDM1_OQ_MAP3           0x005c
-+
-+#define REG_FE_PCE_CFG                        0x0070
-+#define PCE_DPI_EN_MASK                       BIT(2)
-+#define PCE_KA_EN_MASK                        BIT(1)
-+#define PCE_MC_EN_MASK                        BIT(0)
-+
-+#define REG_FE_PSE_QUEUE_CFG_WR               0x0080
-+#define PSE_CFG_PORT_ID_MASK          GENMASK(27, 24)
-+#define PSE_CFG_QUEUE_ID_MASK         GENMASK(20, 16)
-+#define PSE_CFG_WR_EN_MASK            BIT(8)
-+#define PSE_CFG_OQRSV_SEL_MASK                BIT(0)
-+
-+#define REG_FE_PSE_QUEUE_CFG_VAL      0x0084
-+#define PSE_CFG_OQ_RSV_MASK           GENMASK(13, 0)
-+
-+#define PSE_FQ_CFG                    0x008c
-+#define PSE_FQ_LIMIT_MASK             GENMASK(14, 0)
-+
-+#define REG_FE_PSE_BUF_SET            0x0090
-+#define PSE_SHARE_USED_LTHD_MASK      GENMASK(31, 16)
-+#define PSE_ALLRSV_MASK                       GENMASK(14, 0)
-+
-+#define REG_PSE_SHARE_USED_THD                0x0094
-+#define PSE_SHARE_USED_MTHD_MASK      GENMASK(31, 16)
-+#define PSE_SHARE_USED_HTHD_MASK      GENMASK(15, 0)
-+
-+#define REG_GDM_MISC_CFG              0x0148
-+#define GDM2_RDM_ACK_WAIT_PREF_MASK   BIT(9)
-+#define GDM2_CHN_VLD_MODE_MASK                BIT(5)
-+
-+#define REG_FE_CSR_IFC_CFG            CSR_IFC_BASE
-+#define FE_IFC_EN_MASK                        BIT(0)
-+
-+#define REG_FE_VIP_PORT_EN            0x01f0
-+#define REG_FE_IFC_PORT_EN            0x01f4
-+
-+#define REG_PSE_IQ_REV1                       (PSE_BASE + 0x08)
-+#define PSE_IQ_RES1_P2_MASK           GENMASK(23, 16)
-+
-+#define REG_PSE_IQ_REV2                       (PSE_BASE + 0x0c)
-+#define PSE_IQ_RES2_P5_MASK           GENMASK(15, 8)
-+#define PSE_IQ_RES2_P4_MASK           GENMASK(7, 0)
-+
-+#define REG_FE_VIP_EN(_n)             (0x0300 + ((_n) << 3))
-+#define PATN_FCPU_EN_MASK             BIT(7)
-+#define PATN_SWP_EN_MASK              BIT(6)
-+#define PATN_DP_EN_MASK                       BIT(5)
-+#define PATN_SP_EN_MASK                       BIT(4)
-+#define PATN_TYPE_MASK                        GENMASK(3, 1)
-+#define PATN_EN_MASK                  BIT(0)
-+
-+#define REG_FE_VIP_PATN(_n)           (0x0304 + ((_n) << 3))
-+#define PATN_DP_MASK                  GENMASK(31, 16)
-+#define PATN_SP_MASK                  GENMASK(15, 0)
-+
-+#define REG_CDM1_VLAN_CTRL            CDM1_BASE
-+#define CDM1_VLAN_MASK                        GENMASK(31, 16)
-+
-+#define REG_CDM1_FWD_CFG              (CDM1_BASE + 0x08)
-+#define CDM1_VIP_QSEL_MASK            GENMASK(24, 20)
-+
-+#define REG_CDM1_CRSN_QSEL(_n)                (CDM1_BASE + 0x10 + ((_n) << 2))
-+#define CDM1_CRSN_QSEL_REASON_MASK(_n)        \
-+      GENMASK(4 + (((_n) % 4) << 3),  (((_n) % 4) << 3))
-+
-+#define REG_CDM2_FWD_CFG              (CDM2_BASE + 0x08)
-+#define CDM2_OAM_QSEL_MASK            GENMASK(31, 27)
-+#define CDM2_VIP_QSEL_MASK            GENMASK(24, 20)
-+
-+#define REG_CDM2_CRSN_QSEL(_n)                (CDM2_BASE + 0x10 + ((_n) << 2))
-+#define CDM2_CRSN_QSEL_REASON_MASK(_n)        \
-+      GENMASK(4 + (((_n) % 4) << 3),  (((_n) % 4) << 3))
-+
-+#define REG_GDM_FWD_CFG(_n)           GDM_BASE(_n)
-+#define GDM_DROP_CRC_ERR              BIT(23)
-+#define GDM_IP4_CKSUM                 BIT(22)
-+#define GDM_TCP_CKSUM                 BIT(21)
-+#define GDM_UDP_CKSUM                 BIT(20)
-+#define GDM_UCFQ_MASK                 GENMASK(15, 12)
-+#define GDM_BCFQ_MASK                 GENMASK(11, 8)
-+#define GDM_MCFQ_MASK                 GENMASK(7, 4)
-+#define GDM_OCFQ_MASK                 GENMASK(3, 0)
-+
-+#define REG_GDM_INGRESS_CFG(_n)               (GDM_BASE(_n) + 0x10)
-+#define GDM_INGRESS_FC_EN_MASK                BIT(1)
-+#define GDM_STAG_EN_MASK              BIT(0)
-+
-+#define REG_GDM_LEN_CFG(_n)           (GDM_BASE(_n) + 0x14)
-+#define GDM_SHORT_LEN_MASK            GENMASK(13, 0)
-+#define GDM_LONG_LEN_MASK             GENMASK(29, 16)
-+
-+#define REG_FE_CPORT_CFG              (GDM1_BASE + 0x40)
-+#define FE_CPORT_PAD                  BIT(26)
-+#define FE_CPORT_PORT_XFC_MASK                BIT(25)
-+#define FE_CPORT_QUEUE_XFC_MASK               BIT(24)
-+
-+#define REG_FE_GDM_MIB_CLEAR(_n)      (GDM_BASE(_n) + 0xf0)
-+#define FE_GDM_MIB_RX_CLEAR_MASK      BIT(1)
-+#define FE_GDM_MIB_TX_CLEAR_MASK      BIT(0)
-+
-+#define REG_FE_GDM1_MIB_CFG           (GDM1_BASE + 0xf4)
-+#define FE_STRICT_RFC2819_MODE_MASK   BIT(31)
-+#define FE_GDM1_TX_MIB_SPLIT_EN_MASK  BIT(17)
-+#define FE_GDM1_RX_MIB_SPLIT_EN_MASK  BIT(16)
-+#define FE_TX_MIB_ID_MASK             GENMASK(15, 8)
-+#define FE_RX_MIB_ID_MASK             GENMASK(7, 0)
-+
-+#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n)                (GDM_BASE(_n) + 0x104)
-+#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n)               (GDM_BASE(_n) + 0x10c)
-+#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n)               (GDM_BASE(_n) + 0x110)
-+#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n)      (GDM_BASE(_n) + 0x114)
-+#define REG_FE_GDM_TX_ETH_DROP_CNT(_n)                (GDM_BASE(_n) + 0x118)
-+#define REG_FE_GDM_TX_ETH_BC_CNT(_n)          (GDM_BASE(_n) + 0x11c)
-+#define REG_FE_GDM_TX_ETH_MC_CNT(_n)          (GDM_BASE(_n) + 0x120)
-+#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n)                (GDM_BASE(_n) + 0x124)
-+#define REG_FE_GDM_TX_ETH_LONG_CNT(_n)                (GDM_BASE(_n) + 0x128)
-+#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n)               (GDM_BASE(_n) + 0x12c)
-+#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n)               (GDM_BASE(_n) + 0x130)
-+#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n)      (GDM_BASE(_n) + 0x134)
-+#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n)      (GDM_BASE(_n) + 0x138)
-+#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n)      (GDM_BASE(_n) + 0x13c)
-+#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n)     (GDM_BASE(_n) + 0x140)
-+
-+#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n)                (GDM_BASE(_n) + 0x148)
-+#define REG_FE_GDM_RX_FC_DROP_CNT(_n)         (GDM_BASE(_n) + 0x14c)
-+#define REG_FE_GDM_RX_RC_DROP_CNT(_n)         (GDM_BASE(_n) + 0x150)
-+#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n)   (GDM_BASE(_n) + 0x154)
-+#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n)      (GDM_BASE(_n) + 0x158)
-+#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n)               (GDM_BASE(_n) + 0x15c)
-+#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n)               (GDM_BASE(_n) + 0x160)
-+#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n)      (GDM_BASE(_n) + 0x164)
-+#define REG_FE_GDM_RX_ETH_DROP_CNT(_n)                (GDM_BASE(_n) + 0x168)
-+#define REG_FE_GDM_RX_ETH_BC_CNT(_n)          (GDM_BASE(_n) + 0x16c)
-+#define REG_FE_GDM_RX_ETH_MC_CNT(_n)          (GDM_BASE(_n) + 0x170)
-+#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n)     (GDM_BASE(_n) + 0x174)
-+#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n)                (GDM_BASE(_n) + 0x178)
-+#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n)      (GDM_BASE(_n) + 0x17c)
-+#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n)                (GDM_BASE(_n) + 0x180)
-+#define REG_FE_GDM_RX_ETH_LONG_CNT(_n)                (GDM_BASE(_n) + 0x184)
-+#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n)               (GDM_BASE(_n) + 0x188)
-+#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n)               (GDM_BASE(_n) + 0x18c)
-+#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n)      (GDM_BASE(_n) + 0x190)
-+#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n)      (GDM_BASE(_n) + 0x194)
-+#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n)      (GDM_BASE(_n) + 0x198)
-+#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n)     (GDM_BASE(_n) + 0x19c)
-+
-+#define REG_PPE1_TB_HASH_CFG          (PPE1_BASE + 0x250)
-+#define PPE1_SRAM_TABLE_EN_MASK               BIT(0)
-+#define PPE1_SRAM_HASH1_EN_MASK               BIT(8)
-+#define PPE1_DRAM_TABLE_EN_MASK               BIT(16)
-+#define PPE1_DRAM_HASH1_EN_MASK               BIT(24)
-+
-+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n)                (GDM_BASE(_n) + 0x280)
-+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n)               (GDM_BASE(_n) + 0x284)
-+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n)               (GDM_BASE(_n) + 0x288)
-+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n)      (GDM_BASE(_n) + 0x28c)
-+
-+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n)                (GDM_BASE(_n) + 0x290)
-+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n)               (GDM_BASE(_n) + 0x294)
-+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n)               (GDM_BASE(_n) + 0x298)
-+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n)      (GDM_BASE(_n) + 0x29c)
-+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n)               (GDM_BASE(_n) + 0x2b8)
-+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n)               (GDM_BASE(_n) + 0x2bc)
-+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n)      (GDM_BASE(_n) + 0x2c0)
-+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n)      (GDM_BASE(_n) + 0x2c4)
-+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n)      (GDM_BASE(_n) + 0x2c8)
-+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n)     (GDM_BASE(_n) + 0x2cc)
-+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n)               (GDM_BASE(_n) + 0x2e8)
-+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n)               (GDM_BASE(_n) + 0x2ec)
-+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n)      (GDM_BASE(_n) + 0x2f0)
-+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n)      (GDM_BASE(_n) + 0x2f4)
-+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n)      (GDM_BASE(_n) + 0x2f8)
-+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n)     (GDM_BASE(_n) + 0x2fc)
-+
-+#define REG_GDM2_CHN_RLS              (GDM2_BASE + 0x20)
-+#define MBI_RX_AGE_SEL_MASK           GENMASK(26, 25)
-+#define MBI_TX_AGE_SEL_MASK           GENMASK(18, 17)
-+
-+#define REG_GDM3_FWD_CFG              GDM3_BASE
-+#define GDM3_PAD_EN_MASK              BIT(28)
-+
-+#define REG_GDM4_FWD_CFG              GDM4_BASE
-+#define GDM4_PAD_EN_MASK              BIT(28)
-+#define GDM4_SPORT_OFFSET0_MASK               GENMASK(11, 8)
-+
-+#define REG_GDM4_SRC_PORT_SET         (GDM4_BASE + 0x23c)
-+#define GDM4_SPORT_OFF2_MASK          GENMASK(19, 16)
-+#define GDM4_SPORT_OFF1_MASK          GENMASK(15, 12)
-+#define GDM4_SPORT_OFF0_MASK          GENMASK(11, 8)
-+
-+#define REG_IP_FRAG_FP                        0x2010
-+#define IP_ASSEMBLE_PORT_MASK         GENMASK(24, 21)
-+#define IP_ASSEMBLE_NBQ_MASK          GENMASK(20, 16)
-+#define IP_FRAGMENT_PORT_MASK         GENMASK(8, 5)
-+#define IP_FRAGMENT_NBQ_MASK          GENMASK(4, 0)
-+
-+#define REG_MC_VLAN_EN                        0x2100
-+#define MC_VLAN_EN_MASK                       BIT(0)
-+
-+#define REG_MC_VLAN_CFG                       0x2104
-+#define MC_VLAN_CFG_CMD_DONE_MASK     BIT(31)
-+#define MC_VLAN_CFG_TABLE_ID_MASK     GENMASK(21, 16)
-+#define MC_VLAN_CFG_PORT_ID_MASK      GENMASK(11, 8)
-+#define MC_VLAN_CFG_TABLE_SEL_MASK    BIT(4)
-+#define MC_VLAN_CFG_RW_MASK           BIT(0)
-+
-+#define REG_MC_VLAN_DATA              0x2108
-+
-+#define REG_CDM5_RX_OQ1_DROP_CNT      0x29d4
-+
-+/* QDMA */
-+#define REG_QDMA_GLOBAL_CFG                   0x0004
-+#define GLOBAL_CFG_RX_2B_OFFSET_MASK          BIT(31)
-+#define GLOBAL_CFG_DMA_PREFERENCE_MASK                GENMASK(30, 29)
-+#define GLOBAL_CFG_CPU_TXR_RR_MASK            BIT(28)
-+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK                BIT(27)
-+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK     BIT(26)
-+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK   BIT(25)
-+#define GLOBAL_CFG_OAM_MODIFY_MASK            BIT(24)
-+#define GLOBAL_CFG_RESET_MASK                 BIT(23)
-+#define GLOBAL_CFG_RESET_DONE_MASK            BIT(22)
-+#define GLOBAL_CFG_MULTICAST_EN_MASK          BIT(21)
-+#define GLOBAL_CFG_IRQ1_EN_MASK                       BIT(20)
-+#define GLOBAL_CFG_IRQ0_EN_MASK                       BIT(19)
-+#define GLOBAL_CFG_LOOPCNT_EN_MASK            BIT(18)
-+#define GLOBAL_CFG_RD_BYPASS_WR_MASK          BIT(17)
-+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK         BIT(16)
-+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK          GENMASK(13, 8)
-+#define GLOBAL_CFG_CHECK_DONE_MASK            BIT(7)
-+#define GLOBAL_CFG_TX_WB_DONE_MASK            BIT(6)
-+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK         GENMASK(5, 4)
-+#define GLOBAL_CFG_RX_DMA_BUSY_MASK           BIT(3)
-+#define GLOBAL_CFG_RX_DMA_EN_MASK             BIT(2)
-+#define GLOBAL_CFG_TX_DMA_BUSY_MASK           BIT(1)
-+#define GLOBAL_CFG_TX_DMA_EN_MASK             BIT(0)
-+
-+#define REG_FWD_DSCP_BASE                     0x0010
-+#define REG_FWD_BUF_BASE                      0x0014
-+
-+#define REG_HW_FWD_DSCP_CFG                   0x0018
-+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK         GENMASK(29, 28)
-+#define HW_FWD_DSCP_SCATTER_LEN_MASK          GENMASK(17, 16)
-+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK      GENMASK(15, 0)
-+
-+#define REG_INT_STATUS(_n)            \
-+      (((_n) == 4) ? 0x0730 :         \
-+       ((_n) == 3) ? 0x0724 :         \
-+       ((_n) == 2) ? 0x0720 :         \
-+       ((_n) == 1) ? 0x0024 : 0x0020)
-+
-+#define REG_INT_ENABLE(_n)            \
-+      (((_n) == 4) ? 0x0750 :         \
-+       ((_n) == 3) ? 0x0744 :         \
-+       ((_n) == 2) ? 0x0740 :         \
-+       ((_n) == 1) ? 0x002c : 0x0028)
-+
-+/* QDMA_CSR_INT_ENABLE1 */
-+#define RX15_COHERENT_INT_MASK                BIT(31)
-+#define RX14_COHERENT_INT_MASK                BIT(30)
-+#define RX13_COHERENT_INT_MASK                BIT(29)
-+#define RX12_COHERENT_INT_MASK                BIT(28)
-+#define RX11_COHERENT_INT_MASK                BIT(27)
-+#define RX10_COHERENT_INT_MASK                BIT(26)
-+#define RX9_COHERENT_INT_MASK         BIT(25)
-+#define RX8_COHERENT_INT_MASK         BIT(24)
-+#define RX7_COHERENT_INT_MASK         BIT(23)
-+#define RX6_COHERENT_INT_MASK         BIT(22)
-+#define RX5_COHERENT_INT_MASK         BIT(21)
-+#define RX4_COHERENT_INT_MASK         BIT(20)
-+#define RX3_COHERENT_INT_MASK         BIT(19)
-+#define RX2_COHERENT_INT_MASK         BIT(18)
-+#define RX1_COHERENT_INT_MASK         BIT(17)
-+#define RX0_COHERENT_INT_MASK         BIT(16)
-+#define TX7_COHERENT_INT_MASK         BIT(15)
-+#define TX6_COHERENT_INT_MASK         BIT(14)
-+#define TX5_COHERENT_INT_MASK         BIT(13)
-+#define TX4_COHERENT_INT_MASK         BIT(12)
-+#define TX3_COHERENT_INT_MASK         BIT(11)
-+#define TX2_COHERENT_INT_MASK         BIT(10)
-+#define TX1_COHERENT_INT_MASK         BIT(9)
-+#define TX0_COHERENT_INT_MASK         BIT(8)
-+#define CNT_OVER_FLOW_INT_MASK                BIT(7)
-+#define IRQ1_FULL_INT_MASK            BIT(5)
-+#define IRQ1_INT_MASK                 BIT(4)
-+#define HWFWD_DSCP_LOW_INT_MASK               BIT(3)
-+#define HWFWD_DSCP_EMPTY_INT_MASK     BIT(2)
-+#define IRQ0_FULL_INT_MASK            BIT(1)
-+#define IRQ0_INT_MASK                 BIT(0)
-+
-+#define TX_DONE_INT_MASK(_n)                                  \
-+      ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK              \
-+            : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-+
-+#define INT_TX_MASK                                           \
-+      (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK |                   \
-+       IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-+
-+#define INT_IDX0_MASK                                         \
-+      (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK |        \
-+       TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK |        \
-+       TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK |        \
-+       TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK |        \
-+       RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK |        \
-+       RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK |        \
-+       RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK |        \
-+       RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK |        \
-+       RX15_COHERENT_INT_MASK | INT_TX_MASK)
-+
-+/* QDMA_CSR_INT_ENABLE2 */
-+#define RX15_NO_CPU_DSCP_INT_MASK     BIT(31)
-+#define RX14_NO_CPU_DSCP_INT_MASK     BIT(30)
-+#define RX13_NO_CPU_DSCP_INT_MASK     BIT(29)
-+#define RX12_NO_CPU_DSCP_INT_MASK     BIT(28)
-+#define RX11_NO_CPU_DSCP_INT_MASK     BIT(27)
-+#define RX10_NO_CPU_DSCP_INT_MASK     BIT(26)
-+#define RX9_NO_CPU_DSCP_INT_MASK      BIT(25)
-+#define RX8_NO_CPU_DSCP_INT_MASK      BIT(24)
-+#define RX7_NO_CPU_DSCP_INT_MASK      BIT(23)
-+#define RX6_NO_CPU_DSCP_INT_MASK      BIT(22)
-+#define RX5_NO_CPU_DSCP_INT_MASK      BIT(21)
-+#define RX4_NO_CPU_DSCP_INT_MASK      BIT(20)
-+#define RX3_NO_CPU_DSCP_INT_MASK      BIT(19)
-+#define RX2_NO_CPU_DSCP_INT_MASK      BIT(18)
-+#define RX1_NO_CPU_DSCP_INT_MASK      BIT(17)
-+#define RX0_NO_CPU_DSCP_INT_MASK      BIT(16)
-+#define RX15_DONE_INT_MASK            BIT(15)
-+#define RX14_DONE_INT_MASK            BIT(14)
-+#define RX13_DONE_INT_MASK            BIT(13)
-+#define RX12_DONE_INT_MASK            BIT(12)
-+#define RX11_DONE_INT_MASK            BIT(11)
-+#define RX10_DONE_INT_MASK            BIT(10)
-+#define RX9_DONE_INT_MASK             BIT(9)
-+#define RX8_DONE_INT_MASK             BIT(8)
-+#define RX7_DONE_INT_MASK             BIT(7)
-+#define RX6_DONE_INT_MASK             BIT(6)
-+#define RX5_DONE_INT_MASK             BIT(5)
-+#define RX4_DONE_INT_MASK             BIT(4)
-+#define RX3_DONE_INT_MASK             BIT(3)
-+#define RX2_DONE_INT_MASK             BIT(2)
-+#define RX1_DONE_INT_MASK             BIT(1)
-+#define RX0_DONE_INT_MASK             BIT(0)
-+
-+#define RX_DONE_INT_MASK                                      \
-+      (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK |                \
-+       RX2_DONE_INT_MASK | RX3_DONE_INT_MASK |                \
-+       RX4_DONE_INT_MASK | RX7_DONE_INT_MASK |                \
-+       RX8_DONE_INT_MASK | RX9_DONE_INT_MASK |                \
-+       RX15_DONE_INT_MASK)
-+#define INT_IDX1_MASK                                         \
-+      (RX_DONE_INT_MASK |                                     \
-+       RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK |  \
-+       RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK |  \
-+       RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK |  \
-+       RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK |  \
-+       RX15_NO_CPU_DSCP_INT_MASK)
-+
-+/* QDMA_CSR_INT_ENABLE5 */
-+#define TX31_COHERENT_INT_MASK                BIT(31)
-+#define TX30_COHERENT_INT_MASK                BIT(30)
-+#define TX29_COHERENT_INT_MASK                BIT(29)
-+#define TX28_COHERENT_INT_MASK                BIT(28)
-+#define TX27_COHERENT_INT_MASK                BIT(27)
-+#define TX26_COHERENT_INT_MASK                BIT(26)
-+#define TX25_COHERENT_INT_MASK                BIT(25)
-+#define TX24_COHERENT_INT_MASK                BIT(24)
-+#define TX23_COHERENT_INT_MASK                BIT(23)
-+#define TX22_COHERENT_INT_MASK                BIT(22)
-+#define TX21_COHERENT_INT_MASK                BIT(21)
-+#define TX20_COHERENT_INT_MASK                BIT(20)
-+#define TX19_COHERENT_INT_MASK                BIT(19)
-+#define TX18_COHERENT_INT_MASK                BIT(18)
-+#define TX17_COHERENT_INT_MASK                BIT(17)
-+#define TX16_COHERENT_INT_MASK                BIT(16)
-+#define TX15_COHERENT_INT_MASK                BIT(15)
-+#define TX14_COHERENT_INT_MASK                BIT(14)
-+#define TX13_COHERENT_INT_MASK                BIT(13)
-+#define TX12_COHERENT_INT_MASK                BIT(12)
-+#define TX11_COHERENT_INT_MASK                BIT(11)
-+#define TX10_COHERENT_INT_MASK                BIT(10)
-+#define TX9_COHERENT_INT_MASK         BIT(9)
-+#define TX8_COHERENT_INT_MASK         BIT(8)
-+
-+#define INT_IDX4_MASK                                         \
-+      (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK |        \
-+       TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK |      \
-+       TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK |      \
-+       TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK |      \
-+       TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK |      \
-+       TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK |      \
-+       TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK |      \
-+       TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK |      \
-+       TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK |      \
-+       TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK |      \
-+       TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK |      \
-+       TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
-+
-+#define REG_TX_IRQ_BASE(_n)           ((_n) ? 0x0048 : 0x0050)
-+
-+#define REG_TX_IRQ_CFG(_n)            ((_n) ? 0x004c : 0x0054)
-+#define TX_IRQ_THR_MASK                       GENMASK(27, 16)
-+#define TX_IRQ_DEPTH_MASK             GENMASK(11, 0)
-+
-+#define REG_IRQ_CLEAR_LEN(_n)         ((_n) ? 0x0064 : 0x0058)
-+#define IRQ_CLEAR_LEN_MASK            GENMASK(7, 0)
-+
-+#define REG_IRQ_STATUS(_n)            ((_n) ? 0x0068 : 0x005c)
-+#define IRQ_ENTRY_LEN_MASK            GENMASK(27, 16)
-+#define IRQ_HEAD_IDX_MASK             GENMASK(11, 0)
-+
-+#define REG_TX_RING_BASE(_n)  \
-+      (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
-+
-+#define REG_TX_RING_BLOCKING(_n)      \
-+      (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
-+
-+#define TX_RING_IRQ_BLOCKING_MAP_MASK                 BIT(6)
-+#define TX_RING_IRQ_BLOCKING_CFG_MASK                 BIT(4)
-+#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK          BIT(2)
-+#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK    BIT(1)
-+#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK    BIT(0)
-+
-+#define REG_TX_CPU_IDX(_n)    \
-+      (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
-+
-+#define TX_RING_CPU_IDX_MASK          GENMASK(15, 0)
-+
-+#define REG_TX_DMA_IDX(_n)    \
-+      (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
-+
-+#define TX_RING_DMA_IDX_MASK          GENMASK(15, 0)
-+
-+#define IRQ_RING_IDX_MASK             GENMASK(20, 16)
-+#define IRQ_DESC_IDX_MASK             GENMASK(15, 0)
-+
-+#define REG_RX_RING_BASE(_n)  \
-+      (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
-+
-+#define REG_RX_RING_SIZE(_n)  \
-+      (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
-+
-+#define RX_RING_THR_MASK              GENMASK(31, 16)
-+#define RX_RING_SIZE_MASK             GENMASK(15, 0)
-+
-+#define REG_RX_CPU_IDX(_n)    \
-+      (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
-+
-+#define RX_RING_CPU_IDX_MASK          GENMASK(15, 0)
-+
-+#define REG_RX_DMA_IDX(_n)    \
-+      (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
-+
-+#define REG_RX_DELAY_INT_IDX(_n)      \
-+      (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
-+
-+#define RX_DELAY_INT_MASK             GENMASK(15, 0)
-+
-+#define RX_RING_DMA_IDX_MASK          GENMASK(15, 0)
-+
-+#define REG_INGRESS_TRTCM_CFG         0x0070
-+#define INGRESS_TRTCM_EN_MASK         BIT(31)
-+#define INGRESS_TRTCM_MODE_MASK               BIT(30)
-+#define INGRESS_SLOW_TICK_RATIO_MASK  GENMASK(29, 16)
-+#define INGRESS_FAST_TICK_MASK                GENMASK(15, 0)
-+
-+#define REG_QUEUE_CLOSE_CFG(_n)               (0x00a0 + ((_n) & 0xfc))
-+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m)   BIT((_m) + (((_n) & 0x3) << 3))
-+
-+#define REG_TXQ_DIS_CFG_BASE(_n)      ((_n) ? 0x20a0 : 0x00a0)
-+#define REG_TXQ_DIS_CFG(_n, _m)               (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
-+
-+#define REG_CNTR_CFG(_n)              (0x0400 + ((_n) << 3))
-+#define CNTR_EN_MASK                  BIT(31)
-+#define CNTR_ALL_CHAN_EN_MASK         BIT(30)
-+#define CNTR_ALL_QUEUE_EN_MASK                BIT(29)
-+#define CNTR_ALL_DSCP_RING_EN_MASK    BIT(28)
-+#define CNTR_SRC_MASK                 GENMASK(27, 24)
-+#define CNTR_DSCP_RING_MASK           GENMASK(20, 16)
-+#define CNTR_CHAN_MASK                        GENMASK(7, 3)
-+#define CNTR_QUEUE_MASK                       GENMASK(2, 0)
-+
-+#define REG_CNTR_VAL(_n)              (0x0404 + ((_n) << 3))
-+
-+#define REG_LMGR_INIT_CFG             0x1000
-+#define LMGR_INIT_START                       BIT(31)
-+#define LMGR_SRAM_MODE_MASK           BIT(30)
-+#define HW_FWD_PKTSIZE_OVERHEAD_MASK  GENMASK(27, 20)
-+#define HW_FWD_DESC_NUM_MASK          GENMASK(16, 0)
-+
-+#define REG_FWD_DSCP_LOW_THR          0x1004
-+#define FWD_DSCP_LOW_THR_MASK         GENMASK(17, 0)
-+
-+#define REG_EGRESS_RATE_METER_CFG             0x100c
-+#define EGRESS_RATE_METER_EN_MASK             BIT(31)
-+#define EGRESS_RATE_METER_EQ_RATE_EN_MASK     BIT(17)
-+#define EGRESS_RATE_METER_WINDOW_SZ_MASK      GENMASK(16, 12)
-+#define EGRESS_RATE_METER_TIMESLICE_MASK      GENMASK(10, 0)
-+
-+#define REG_EGRESS_TRTCM_CFG          0x1010
-+#define EGRESS_TRTCM_EN_MASK          BIT(31)
-+#define EGRESS_TRTCM_MODE_MASK                BIT(30)
-+#define EGRESS_SLOW_TICK_RATIO_MASK   GENMASK(29, 16)
-+#define EGRESS_FAST_TICK_MASK         GENMASK(15, 0)
-+
-+#define TRTCM_PARAM_RW_MASK           BIT(31)
-+#define TRTCM_PARAM_RW_DONE_MASK      BIT(30)
-+#define TRTCM_PARAM_TYPE_MASK         GENMASK(29, 28)
-+#define TRTCM_METER_GROUP_MASK                GENMASK(27, 26)
-+#define TRTCM_PARAM_INDEX_MASK                GENMASK(23, 17)
-+#define TRTCM_PARAM_RATE_TYPE_MASK    BIT(16)
-+
-+#define REG_TRTCM_CFG_PARAM(_n)               ((_n) + 0x4)
-+#define REG_TRTCM_DATA_LOW(_n)                ((_n) + 0x8)
-+#define REG_TRTCM_DATA_HIGH(_n)               ((_n) + 0xc)
-+
-+#define REG_TXWRR_MODE_CFG            0x1020
-+#define TWRR_WEIGHT_SCALE_MASK                BIT(31)
-+#define TWRR_WEIGHT_BASE_MASK         BIT(3)
-+
-+#define REG_TXWRR_WEIGHT_CFG          0x1024
-+#define TWRR_RW_CMD_MASK              BIT(31)
-+#define TWRR_RW_CMD_DONE              BIT(30)
-+#define TWRR_CHAN_IDX_MASK            GENMASK(23, 19)
-+#define TWRR_QUEUE_IDX_MASK           GENMASK(18, 16)
-+#define TWRR_VALUE_MASK                       GENMASK(15, 0)
-+
-+#define REG_PSE_BUF_USAGE_CFG         0x1028
-+#define PSE_BUF_ESTIMATE_EN_MASK      BIT(29)
-+
-+#define REG_CHAN_QOS_MODE(_n)         (0x1040 + ((_n) << 2))
-+#define CHAN_QOS_MODE_MASK(_n)                GENMASK(2 + ((_n) << 2), (_n) << 2)
-+
-+#define REG_GLB_TRTCM_CFG             0x1080
-+#define GLB_TRTCM_EN_MASK             BIT(31)
-+#define GLB_TRTCM_MODE_MASK           BIT(30)
-+#define GLB_SLOW_TICK_RATIO_MASK      GENMASK(29, 16)
-+#define GLB_FAST_TICK_MASK            GENMASK(15, 0)
-+
-+#define REG_TXQ_CNGST_CFG             0x10a0
-+#define TXQ_CNGST_DROP_EN             BIT(31)
-+#define TXQ_CNGST_DEI_DROP_EN         BIT(30)
-+
-+#define REG_SLA_TRTCM_CFG             0x1150
-+#define SLA_TRTCM_EN_MASK             BIT(31)
-+#define SLA_TRTCM_MODE_MASK           BIT(30)
-+#define SLA_SLOW_TICK_RATIO_MASK      GENMASK(29, 16)
-+#define SLA_FAST_TICK_MASK            GENMASK(15, 0)
-+
-+/* CTRL */
-+#define QDMA_DESC_DONE_MASK           BIT(31)
-+#define QDMA_DESC_DROP_MASK           BIT(30) /* tx: drop - rx: overflow */
-+#define QDMA_DESC_MORE_MASK           BIT(29) /* more SG elements */
-+#define QDMA_DESC_DEI_MASK            BIT(25)
-+#define QDMA_DESC_NO_DROP_MASK                BIT(24)
-+#define QDMA_DESC_LEN_MASK            GENMASK(15, 0)
-+/* DATA */
-+#define QDMA_DESC_NEXT_ID_MASK                GENMASK(15, 0)
-+/* TX MSG0 */
-+#define QDMA_ETH_TXMSG_MIC_IDX_MASK   BIT(30)
-+#define QDMA_ETH_TXMSG_SP_TAG_MASK    GENMASK(29, 14)
-+#define QDMA_ETH_TXMSG_ICO_MASK               BIT(13)
-+#define QDMA_ETH_TXMSG_UCO_MASK               BIT(12)
-+#define QDMA_ETH_TXMSG_TCO_MASK               BIT(11)
-+#define QDMA_ETH_TXMSG_TSO_MASK               BIT(10)
-+#define QDMA_ETH_TXMSG_FAST_MASK      BIT(9)
-+#define QDMA_ETH_TXMSG_OAM_MASK               BIT(8)
-+#define QDMA_ETH_TXMSG_CHAN_MASK      GENMASK(7, 3)
-+#define QDMA_ETH_TXMSG_QUEUE_MASK     GENMASK(2, 0)
-+/* TX MSG1 */
-+#define QDMA_ETH_TXMSG_NO_DROP                BIT(31)
-+#define QDMA_ETH_TXMSG_METER_MASK     GENMASK(30, 24) /* 0x7f no meters */
-+#define QDMA_ETH_TXMSG_FPORT_MASK     GENMASK(23, 20)
-+#define QDMA_ETH_TXMSG_NBOQ_MASK      GENMASK(19, 15)
-+#define QDMA_ETH_TXMSG_HWF_MASK               BIT(14)
-+#define QDMA_ETH_TXMSG_HOP_MASK               BIT(13)
-+#define QDMA_ETH_TXMSG_PTP_MASK               BIT(12)
-+#define QDMA_ETH_TXMSG_ACNT_G1_MASK   GENMASK(10, 6)  /* 0x1f do not count */
-+#define QDMA_ETH_TXMSG_ACNT_G0_MASK   GENMASK(5, 0)   /* 0x3f do not count */
-+
-+/* RX MSG1 */
-+#define QDMA_ETH_RXMSG_DEI_MASK               BIT(31)
-+#define QDMA_ETH_RXMSG_IP6_MASK               BIT(30)
-+#define QDMA_ETH_RXMSG_IP4_MASK               BIT(29)
-+#define QDMA_ETH_RXMSG_IP4F_MASK      BIT(28)
-+#define QDMA_ETH_RXMSG_L4_VALID_MASK  BIT(27)
-+#define QDMA_ETH_RXMSG_L4F_MASK               BIT(26)
-+#define QDMA_ETH_RXMSG_SPORT_MASK     GENMASK(25, 21)
-+#define QDMA_ETH_RXMSG_CRSN_MASK      GENMASK(20, 16)
-+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
-+
-+struct airoha_qdma_desc {
-+      __le32 rsv;
-+      __le32 ctrl;
-+      __le32 addr;
-+      __le32 data;
-+      __le32 msg0;
-+      __le32 msg1;
-+      __le32 msg2;
-+      __le32 msg3;
-+};
-+
-+/* CTRL0 */
-+#define QDMA_FWD_DESC_CTX_MASK                BIT(31)
-+#define QDMA_FWD_DESC_RING_MASK               GENMASK(30, 28)
-+#define QDMA_FWD_DESC_IDX_MASK                GENMASK(27, 16)
-+#define QDMA_FWD_DESC_LEN_MASK                GENMASK(15, 0)
-+/* CTRL1 */
-+#define QDMA_FWD_DESC_FIRST_IDX_MASK  GENMASK(15, 0)
-+/* CTRL2 */
-+#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK       GENMASK(2, 0)
-+
-+struct airoha_qdma_fwd_desc {
-+      __le32 addr;
-+      __le32 ctrl0;
-+      __le32 ctrl1;
-+      __le32 ctrl2;
-+      __le32 msg0;
-+      __le32 msg1;
-+      __le32 rsv0;
-+      __le32 rsv1;
-+};
-+
-+#endif /* AIROHA_REGS_H */
diff --git a/target/linux/airoha/patches-6.6/048-05-v6.15-net-airoha-Move-DSA-tag-in-DMA-descriptor.patch b/target/linux/airoha/patches-6.6/048-05-v6.15-net-airoha-Move-DSA-tag-in-DMA-descriptor.patch
deleted file mode 100644 (file)
index 61e889f..0000000
+++ /dev/null
@@ -1,287 +0,0 @@
-From af3cf757d5c99011b9b94ea8d78aeaccc0153fdc Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:13 +0100
-Subject: [PATCH 05/15] net: airoha: Move DSA tag in DMA descriptor
-
-Packet Processor Engine (PPE) module reads DSA tags from the DMA descriptor
-and requires untagged DSA packets to properly parse them. Move DSA tag
-in the DMA descriptor on TX side and read DSA tag from DMA descriptor
-on RX side. In order to avoid skb reallocation, store tag in skb_dst on
-RX side.
-This is a preliminary patch to enable netfilter flowtable hw offloading
-on EN7581 SoC.
-
-Tested-by: Sayantan Nandy <sayantan.nandy@airoha.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c  | 125 ++++++++++++++++++++--
- drivers/net/ethernet/airoha/airoha_eth.h  |   7 ++
- drivers/net/ethernet/airoha/airoha_regs.h |   2 +
- 3 files changed, 128 insertions(+), 6 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -9,6 +9,7 @@
- #include <linux/tcp.h>
- #include <linux/u64_stats_sync.h>
- #include <net/dsa.h>
-+#include <net/dst_metadata.h>
- #include <net/page_pool/helpers.h>
- #include <net/pkt_cls.h>
- #include <uapi/linux/ppp_defs.h>
-@@ -656,6 +657,7 @@ static int airoha_qdma_rx_process(struct
-               struct airoha_qdma_desc *desc = &q->desc[q->tail];
-               dma_addr_t dma_addr = le32_to_cpu(desc->addr);
-               u32 desc_ctrl = le32_to_cpu(desc->ctrl);
-+              struct airoha_gdm_port *port;
-               struct sk_buff *skb;
-               int len, p;
-@@ -683,6 +685,7 @@ static int airoha_qdma_rx_process(struct
-                       continue;
-               }
-+              port = eth->ports[p];
-               skb = napi_build_skb(e->buf, q->buf_size);
-               if (!skb) {
-                       page_pool_put_full_page(q->page_pool,
-@@ -694,10 +697,26 @@ static int airoha_qdma_rx_process(struct
-               skb_reserve(skb, 2);
-               __skb_put(skb, len);
-               skb_mark_for_recycle(skb);
--              skb->dev = eth->ports[p]->dev;
-+              skb->dev = port->dev;
-               skb->protocol = eth_type_trans(skb, skb->dev);
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-               skb_record_rx_queue(skb, qid);
-+
-+              if (netdev_uses_dsa(port->dev)) {
-+                      /* PPE module requires untagged packets to work
-+                       * properly and it provides DSA port index via the
-+                       * DMA descriptor. Report DSA tag to the DSA stack
-+                       * via skb dst info.
-+                       */
-+                      u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
-+                                            le32_to_cpu(desc->msg0));
-+
-+                      if (sptag < ARRAY_SIZE(port->dsa_meta) &&
-+                          port->dsa_meta[sptag])
-+                              skb_dst_set_noref(skb,
-+                                                &port->dsa_meta[sptag]->dst);
-+              }
-+
-               napi_gro_receive(&q->napi, skb);
-               done++;
-@@ -1637,25 +1656,76 @@ static u16 airoha_dev_select_queue(struc
-       return queue < dev->num_tx_queues ? queue : 0;
- }
-+static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
-+{
-+#if IS_ENABLED(CONFIG_NET_DSA)
-+      struct ethhdr *ehdr;
-+      struct dsa_port *dp;
-+      u8 xmit_tpid;
-+      u16 tag;
-+
-+      if (!netdev_uses_dsa(dev))
-+              return 0;
-+
-+      dp = dev->dsa_ptr;
-+      if (IS_ERR(dp))
-+              return 0;
-+
-+      if (dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
-+              return 0;
-+
-+      if (skb_cow_head(skb, 0))
-+              return 0;
-+
-+      ehdr = (struct ethhdr *)skb->data;
-+      tag = be16_to_cpu(ehdr->h_proto);
-+      xmit_tpid = tag >> 8;
-+
-+      switch (xmit_tpid) {
-+      case MTK_HDR_XMIT_TAGGED_TPID_8100:
-+              ehdr->h_proto = cpu_to_be16(ETH_P_8021Q);
-+              tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8);
-+              break;
-+      case MTK_HDR_XMIT_TAGGED_TPID_88A8:
-+              ehdr->h_proto = cpu_to_be16(ETH_P_8021AD);
-+              tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8);
-+              break;
-+      default:
-+              /* PPE module requires untagged DSA packets to work properly,
-+               * so move DSA tag to DMA descriptor.
-+               */
-+              memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN);
-+              __skb_pull(skb, MTK_HDR_LEN);
-+              break;
-+      }
-+
-+      return tag;
-+#else
-+      return 0;
-+#endif
-+}
-+
- static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
-                                  struct net_device *dev)
- {
-       struct airoha_gdm_port *port = netdev_priv(dev);
--      u32 nr_frags = 1 + skb_shinfo(skb)->nr_frags;
--      u32 msg0, msg1, len = skb_headlen(skb);
-       struct airoha_qdma *qdma = port->qdma;
-+      u32 nr_frags, tag, msg0, msg1, len;
-       struct netdev_queue *txq;
-       struct airoha_queue *q;
--      void *data = skb->data;
-+      void *data;
-       int i, qid;
-       u16 index;
-       u8 fport;
-       qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
-+      tag = airoha_get_dsa_tag(skb, dev);
-+
-       msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
-                         qid / AIROHA_NUM_QOS_QUEUES) |
-              FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
--                        qid % AIROHA_NUM_QOS_QUEUES);
-+                        qid % AIROHA_NUM_QOS_QUEUES) |
-+             FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag);
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
-                       FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
-@@ -1686,6 +1756,8 @@ static netdev_tx_t airoha_dev_xmit(struc
-       spin_lock_bh(&q->lock);
-       txq = netdev_get_tx_queue(dev, qid);
-+      nr_frags = 1 + skb_shinfo(skb)->nr_frags;
-+
-       if (q->queued + nr_frags > q->ndesc) {
-               /* not enough space in the queue */
-               netif_tx_stop_queue(txq);
-@@ -1693,7 +1765,10 @@ static netdev_tx_t airoha_dev_xmit(struc
-               return NETDEV_TX_BUSY;
-       }
-+      len = skb_headlen(skb);
-+      data = skb->data;
-       index = q->head;
-+
-       for (i = 0; i < nr_frags; i++) {
-               struct airoha_qdma_desc *desc = &q->desc[index];
-               struct airoha_queue_entry *e = &q->entry[index];
-@@ -2224,6 +2299,37 @@ static const struct ethtool_ops airoha_e
-       .get_rmon_stats         = airoha_ethtool_get_rmon_stats,
- };
-+static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
-+              struct metadata_dst *md_dst;
-+
-+              md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
-+                                          GFP_KERNEL);
-+              if (!md_dst)
-+                      return -ENOMEM;
-+
-+              md_dst->u.port_info.port_id = i;
-+              port->dsa_meta[i] = md_dst;
-+      }
-+
-+      return 0;
-+}
-+
-+static void airoha_metadata_dst_free(struct airoha_gdm_port *port)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
-+              if (!port->dsa_meta[i])
-+                      continue;
-+
-+              metadata_dst_free(port->dsa_meta[i]);
-+      }
-+}
-+
- static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
- {
-       const __be32 *id_ptr = of_get_property(np, "reg", NULL);
-@@ -2296,6 +2402,10 @@ static int airoha_alloc_gdm_port(struct
-       port->id = id;
-       eth->ports[index] = port;
-+      err = airoha_metadata_dst_alloc(port);
-+      if (err)
-+              return err;
-+
-       return register_netdev(dev);
- }
-@@ -2388,8 +2498,10 @@ error_hw_cleanup:
-       for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
-               struct airoha_gdm_port *port = eth->ports[i];
--              if (port && port->dev->reg_state == NETREG_REGISTERED)
-+              if (port && port->dev->reg_state == NETREG_REGISTERED) {
-                       unregister_netdev(port->dev);
-+                      airoha_metadata_dst_free(port);
-+              }
-       }
-       free_netdev(eth->napi_dev);
-       platform_set_drvdata(pdev, NULL);
-@@ -2415,6 +2527,7 @@ static void airoha_remove(struct platfor
-               airoha_dev_stop(port->dev);
-               unregister_netdev(port->dev);
-+              airoha_metadata_dst_free(port);
-       }
-       free_netdev(eth->napi_dev);
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -15,6 +15,7 @@
- #define AIROHA_MAX_NUM_GDM_PORTS      1
- #define AIROHA_MAX_NUM_QDMA           2
-+#define AIROHA_MAX_DSA_PORTS          7
- #define AIROHA_MAX_NUM_RSTS           3
- #define AIROHA_MAX_NUM_XSI_RSTS               5
- #define AIROHA_MAX_MTU                        2000
-@@ -43,6 +44,10 @@
- #define QDMA_METER_IDX(_n)            ((_n) & 0xff)
- #define QDMA_METER_GROUP(_n)          (((_n) >> 8) & 0x3)
-+#define MTK_HDR_LEN                   4
-+#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
-+#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
-+
- enum {
-       QDMA_INT_REG_IDX0,
-       QDMA_INT_REG_IDX1,
-@@ -231,6 +236,8 @@ struct airoha_gdm_port {
-       /* qos stats counters */
-       u64 cpu_tx_packets;
-       u64 fwd_tx_packets;
-+
-+      struct metadata_dst *dsa_meta[AIROHA_MAX_DSA_PORTS];
- };
- struct airoha_eth {
---- a/drivers/net/ethernet/airoha/airoha_regs.h
-+++ b/drivers/net/ethernet/airoha/airoha_regs.h
-@@ -624,6 +624,8 @@
- #define QDMA_ETH_TXMSG_ACNT_G1_MASK   GENMASK(10, 6)  /* 0x1f do not count */
- #define QDMA_ETH_TXMSG_ACNT_G0_MASK   GENMASK(5, 0)   /* 0x3f do not count */
-+/* RX MSG0 */
-+#define QDMA_ETH_RXMSG_SPTAG          GENMASK(21, 14)
- /* RX MSG1 */
- #define QDMA_ETH_RXMSG_DEI_MASK               BIT(31)
- #define QDMA_ETH_RXMSG_IP6_MASK               BIT(30)
diff --git a/target/linux/airoha/patches-6.6/048-06-v6.15-net-dsa-mt7530-Enable-Rx-sptag-for-EN7581-SoC.patch b/target/linux/airoha/patches-6.6/048-06-v6.15-net-dsa-mt7530-Enable-Rx-sptag-for-EN7581-SoC.patch
deleted file mode 100644 (file)
index 27956d5..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-From ab667db1e6014634c6607ebdddc16c1b8394a935 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:14 +0100
-Subject: [PATCH 06/15] net: dsa: mt7530: Enable Rx sptag for EN7581 SoC
-
-Packet Processor Engine (PPE) module used for hw acceleration on EN7581
-mac block, in order to properly parse packets, requires DSA untagged
-packets on TX side and read DSA tag from DMA descriptor on RX side.
-For this reason, enable RX Special Tag (SPTAG) for EN7581 SoC.
-This is a preliminary patch to enable netfilter flowtable hw offloading
-on EN7581 SoC.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/dsa/mt7530.c | 5 +++++
- drivers/net/dsa/mt7530.h | 4 ++++
- 2 files changed, 9 insertions(+)
-
---- a/drivers/net/dsa/mt7530.c
-+++ b/drivers/net/dsa/mt7530.c
-@@ -2588,6 +2588,11 @@ mt7531_setup_common(struct dsa_switch *d
-       /* Allow mirroring frames received on the local port (monitor port). */
-       mt7530_set(priv, MT753X_AGC, LOCAL_EN);
-+      /* Enable Special Tag for rx frames */
-+      if (priv->id == ID_EN7581)
-+              mt7530_write(priv, MT753X_CPORT_SPTAG_CFG,
-+                           CPORT_SW2FE_STAG_EN | CPORT_FE2SW_STAG_EN);
-+
-       /* Flush the FDB table */
-       ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
-       if (ret < 0)
---- a/drivers/net/dsa/mt7530.h
-+++ b/drivers/net/dsa/mt7530.h
-@@ -615,6 +615,10 @@ enum mt7531_xtal_fsel {
- #define  MT7531_GPIO12_RG_RXD3_MASK   GENMASK(19, 16)
- #define  MT7531_EXT_P_MDIO_12         (2 << 16)
-+#define MT753X_CPORT_SPTAG_CFG                0x7c10
-+#define  CPORT_SW2FE_STAG_EN          BIT(1)
-+#define  CPORT_FE2SW_STAG_EN          BIT(0)
-+
- /* Registers for LED GPIO control (MT7530 only)
-  * All registers follow this pattern:
-  * [ 2: 0]  port 0
diff --git a/target/linux/airoha/patches-6.6/048-07-v6.15-net-airoha-Enable-support-for-multiple-net_devices.patch b/target/linux/airoha/patches-6.6/048-07-v6.15-net-airoha-Enable-support-for-multiple-net_devices.patch
deleted file mode 100644 (file)
index af4fa78..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-From 80369686737fe07c233a1152da0b84372dabdcd6 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:15 +0100
-Subject: [PATCH 07/15] net: airoha: Enable support for multiple net_devices
-
-In the current codebase airoha_eth driver supports just a single
-net_device connected to the Packet Switch Engine (PSE) lan port (GDM1).
-As shown in commit 23020f049327 ("net: airoha: Introduce ethernet
-support for EN7581 SoC"), PSE can switch packets between four GDM ports.
-Enable the capability to create a net_device for each GDM port of the
-PSE module. Moreover, since the QDMA blocks can be shared between
-net_devices, do not stop TX/RX DMA in airoha_dev_stop() if there are
-active net_devices for this QDMA block.
-This is a preliminary patch to enable flowtable hw offloading for EN7581
-SoC.
-
-Co-developed-by: Christian Marangi <ansuelsmth@gmail.com>
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 35 ++++++++++++++----------
- drivers/net/ethernet/airoha/airoha_eth.h |  4 ++-
- 2 files changed, 24 insertions(+), 15 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -1563,6 +1563,7 @@ static int airoha_dev_open(struct net_de
-       airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
-                       GLOBAL_CFG_TX_DMA_EN_MASK |
-                       GLOBAL_CFG_RX_DMA_EN_MASK);
-+      atomic_inc(&qdma->users);
-       return 0;
- }
-@@ -1578,16 +1579,20 @@ static int airoha_dev_stop(struct net_de
-       if (err)
-               return err;
--      airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
--                        GLOBAL_CFG_TX_DMA_EN_MASK |
--                        GLOBAL_CFG_RX_DMA_EN_MASK);
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++)
-+              netdev_tx_reset_subqueue(dev, i);
--      for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
--              if (!qdma->q_tx[i].ndesc)
--                      continue;
-+      if (atomic_dec_and_test(&qdma->users)) {
-+              airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
-+                                GLOBAL_CFG_TX_DMA_EN_MASK |
-+                                GLOBAL_CFG_RX_DMA_EN_MASK);
-+
-+              for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-+                      if (!qdma->q_tx[i].ndesc)
-+                              continue;
--              airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
--              netdev_tx_reset_subqueue(dev, i);
-+                      airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
-+              }
-       }
-       return 0;
-@@ -2330,13 +2335,14 @@ static void airoha_metadata_dst_free(str
-       }
- }
--static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
-+static int airoha_alloc_gdm_port(struct airoha_eth *eth,
-+                               struct device_node *np, int index)
- {
-       const __be32 *id_ptr = of_get_property(np, "reg", NULL);
-       struct airoha_gdm_port *port;
-       struct airoha_qdma *qdma;
-       struct net_device *dev;
--      int err, index;
-+      int err, p;
-       u32 id;
-       if (!id_ptr) {
-@@ -2345,14 +2351,14 @@ static int airoha_alloc_gdm_port(struct
-       }
-       id = be32_to_cpup(id_ptr);
--      index = id - 1;
-+      p = id - 1;
-       if (!id || id > ARRAY_SIZE(eth->ports)) {
-               dev_err(eth->dev, "invalid gdm port id: %d\n", id);
-               return -EINVAL;
-       }
--      if (eth->ports[index]) {
-+      if (eth->ports[p]) {
-               dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
-               return -EINVAL;
-       }
-@@ -2400,7 +2406,7 @@ static int airoha_alloc_gdm_port(struct
-       port->qdma = qdma;
-       port->dev = dev;
-       port->id = id;
--      eth->ports[index] = port;
-+      eth->ports[p] = port;
-       err = airoha_metadata_dst_alloc(port);
-       if (err)
-@@ -2472,6 +2478,7 @@ static int airoha_probe(struct platform_
-       for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
-               airoha_qdma_start_napi(&eth->qdma[i]);
-+      i = 0;
-       for_each_child_of_node(pdev->dev.of_node, np) {
-               if (!of_device_is_compatible(np, "airoha,eth-mac"))
-                       continue;
-@@ -2479,7 +2486,7 @@ static int airoha_probe(struct platform_
-               if (!of_device_is_available(np))
-                       continue;
--              err = airoha_alloc_gdm_port(eth, np);
-+              err = airoha_alloc_gdm_port(eth, np, i++);
-               if (err) {
-                       of_node_put(np);
-                       goto error_napi_stop;
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -13,7 +13,7 @@
- #include <linux/netdevice.h>
- #include <linux/reset.h>
--#define AIROHA_MAX_NUM_GDM_PORTS      1
-+#define AIROHA_MAX_NUM_GDM_PORTS      4
- #define AIROHA_MAX_NUM_QDMA           2
- #define AIROHA_MAX_DSA_PORTS          7
- #define AIROHA_MAX_NUM_RSTS           3
-@@ -212,6 +212,8 @@ struct airoha_qdma {
-       u32 irqmask[QDMA_INT_REG_MAX];
-       int irq;
-+      atomic_t users;
-+
-       struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
-       struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
diff --git a/target/linux/airoha/patches-6.6/048-08-v6.15-net-airoha-Move-REG_GDM_FWD_CFG-initialization-in-ai.patch b/target/linux/airoha/patches-6.6/048-08-v6.15-net-airoha-Move-REG_GDM_FWD_CFG-initialization-in-ai.patch
deleted file mode 100644 (file)
index 8bc0f85..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-From 67fde5d58cd43d129a979e918ec9cd5d2e2fbcfb Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:16 +0100
-Subject: [PATCH 08/15] net: airoha: Move REG_GDM_FWD_CFG() initialization in
- airoha_dev_init()
-
-Move REG_GDM_FWD_CFG() register initialization in airoha_dev_init
-routine. Moreover, always send traffic PPE module in order to be
-processed by hw accelerator.
-This is a preliminary patch to enable netfilter flowtable hw offloading
-on EN7581 SoC.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 14 ++++----------
- 1 file changed, 4 insertions(+), 10 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -107,25 +107,20 @@ static void airoha_set_gdm_port_fwd_cfg(
- static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
- {
--      u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
--      u32 vip_port, cfg_addr;
-+      u32 vip_port;
-       switch (port) {
-       case XSI_PCIE0_PORT:
-               vip_port = XSI_PCIE0_VIP_PORT_MASK;
--              cfg_addr = REG_GDM_FWD_CFG(3);
-               break;
-       case XSI_PCIE1_PORT:
-               vip_port = XSI_PCIE1_VIP_PORT_MASK;
--              cfg_addr = REG_GDM_FWD_CFG(3);
-               break;
-       case XSI_USB_PORT:
-               vip_port = XSI_USB_VIP_PORT_MASK;
--              cfg_addr = REG_GDM_FWD_CFG(4);
-               break;
-       case XSI_ETH_PORT:
-               vip_port = XSI_ETH_VIP_PORT_MASK;
--              cfg_addr = REG_GDM_FWD_CFG(4);
-               break;
-       default:
-               return -EINVAL;
-@@ -139,8 +134,6 @@ static int airoha_set_gdm_port(struct ai
-               airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
-       }
--      airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
--
-       return 0;
- }
-@@ -177,8 +170,6 @@ static void airoha_fe_maccr_init(struct
-               airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
-                             GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
-                             GDM_DROP_CRC_ERR);
--              airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
--                                          FE_PSE_PORT_CDM1);
-               airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
-                             GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
-                             FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
-@@ -1615,8 +1606,11 @@ static int airoha_dev_set_macaddr(struct
- static int airoha_dev_init(struct net_device *dev)
- {
-       struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct airoha_eth *eth = port->qdma->eth;
-       airoha_set_macaddr(port, dev->dev_addr);
-+      airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id),
-+                                  FE_PSE_PORT_PPE1);
-       return 0;
- }
diff --git a/target/linux/airoha/patches-6.6/048-09-v6.15-net-airoha-Rename-airoha_set_gdm_port_fwd_cfg-in-air.patch b/target/linux/airoha/patches-6.6/048-09-v6.15-net-airoha-Rename-airoha_set_gdm_port_fwd_cfg-in-air.patch
deleted file mode 100644 (file)
index 11f879d..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-From c28b8375f6d02ef3b5e8c51234cc3f6d47d9fb7f Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:17 +0100
-Subject: [PATCH 09/15] net: airoha: Rename airoha_set_gdm_port_fwd_cfg() in
- airoha_set_vip_for_gdm_port()
-
-Rename airoha_set_gdm_port() in airoha_set_vip_for_gdm_port().
-Get rid of airoha_set_gdm_ports routine.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 49 ++++++------------------
- drivers/net/ethernet/airoha/airoha_eth.h |  8 ----
- 2 files changed, 11 insertions(+), 46 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -105,25 +105,23 @@ static void airoha_set_gdm_port_fwd_cfg(
-                     FIELD_PREP(GDM_UCFQ_MASK, val));
- }
--static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
-+static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
-+                                     bool enable)
- {
-+      struct airoha_eth *eth = port->qdma->eth;
-       u32 vip_port;
--      switch (port) {
--      case XSI_PCIE0_PORT:
-+      switch (port->id) {
-+      case 3:
-+              /* FIXME: handle XSI_PCIE1_PORT */
-               vip_port = XSI_PCIE0_VIP_PORT_MASK;
-               break;
--      case XSI_PCIE1_PORT:
--              vip_port = XSI_PCIE1_VIP_PORT_MASK;
--              break;
--      case XSI_USB_PORT:
--              vip_port = XSI_USB_VIP_PORT_MASK;
--              break;
--      case XSI_ETH_PORT:
-+      case 4:
-+              /* FIXME: handle XSI_USB_PORT */
-               vip_port = XSI_ETH_VIP_PORT_MASK;
-               break;
-       default:
--              return -EINVAL;
-+              return 0;
-       }
-       if (enable) {
-@@ -137,31 +135,6 @@ static int airoha_set_gdm_port(struct ai
-       return 0;
- }
--static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
--{
--      const int port_list[] = {
--              XSI_PCIE0_PORT,
--              XSI_PCIE1_PORT,
--              XSI_USB_PORT,
--              XSI_ETH_PORT
--      };
--      int i, err;
--
--      for (i = 0; i < ARRAY_SIZE(port_list); i++) {
--              err = airoha_set_gdm_port(eth, port_list[i], enable);
--              if (err)
--                      goto error;
--      }
--
--      return 0;
--
--error:
--      for (i--; i >= 0; i--)
--              airoha_set_gdm_port(eth, port_list[i], false);
--
--      return err;
--}
--
- static void airoha_fe_maccr_init(struct airoha_eth *eth)
- {
-       int p;
-@@ -1540,7 +1513,7 @@ static int airoha_dev_open(struct net_de
-       int err;
-       netif_tx_start_all_queues(dev);
--      err = airoha_set_gdm_ports(qdma->eth, true);
-+      err = airoha_set_vip_for_gdm_port(port, true);
-       if (err)
-               return err;
-@@ -1566,7 +1539,7 @@ static int airoha_dev_stop(struct net_de
-       int i, err;
-       netif_tx_disable(dev);
--      err = airoha_set_gdm_ports(qdma->eth, false);
-+      err = airoha_set_vip_for_gdm_port(port, false);
-       if (err)
-               return err;
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -58,14 +58,6 @@ enum {
- };
- enum {
--      XSI_PCIE0_PORT,
--      XSI_PCIE1_PORT,
--      XSI_USB_PORT,
--      XSI_AE_PORT,
--      XSI_ETH_PORT,
--};
--
--enum {
-       XSI_PCIE0_VIP_PORT_MASK = BIT(22),
-       XSI_PCIE1_VIP_PORT_MASK = BIT(23),
-       XSI_USB_VIP_PORT_MASK   = BIT(25),
diff --git a/target/linux/airoha/patches-6.6/048-12-v6.15-net-airoha-Introduce-Airoha-NPU-support.patch b/target/linux/airoha/patches-6.6/048-12-v6.15-net-airoha-Introduce-Airoha-NPU-support.patch
deleted file mode 100644 (file)
index 41c5622..0000000
+++ /dev/null
@@ -1,627 +0,0 @@
-From 23290c7bc190def4e1ca61610992d9b7c32e33f3 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:20 +0100
-Subject: [PATCH 12/15] net: airoha: Introduce Airoha NPU support
-
-Packet Processor Engine (PPE) module available on EN7581 SoC populates
-the PPE table with 5-tuples flower rules learned from traffic forwarded
-between the GDM ports connected to the Packet Switch Engine (PSE) module.
-The airoha_eth driver can enable hw acceleration of learned 5-tuples
-rules if the user configure them in netfilter flowtable (netfilter
-flowtable support will be added with subsequent patches).
-airoha_eth driver configures and collects data from the PPE module via a
-Network Processor Unit (NPU) RISC-V module available on the EN7581 SoC.
-Introduce basic support for Airoha NPU module.
-
-Tested-by: Sayantan Nandy <sayantan.nandy@airoha.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/Kconfig      |   9 +
- drivers/net/ethernet/airoha/Makefile     |   1 +
- drivers/net/ethernet/airoha/airoha_eth.h |   2 +
- drivers/net/ethernet/airoha/airoha_npu.c | 520 +++++++++++++++++++++++
- drivers/net/ethernet/airoha/airoha_npu.h |  34 ++
- 5 files changed, 566 insertions(+)
- create mode 100644 drivers/net/ethernet/airoha/airoha_npu.c
- create mode 100644 drivers/net/ethernet/airoha/airoha_npu.h
-
---- a/drivers/net/ethernet/airoha/Kconfig
-+++ b/drivers/net/ethernet/airoha/Kconfig
-@@ -7,9 +7,18 @@ config NET_VENDOR_AIROHA
- if NET_VENDOR_AIROHA
-+config NET_AIROHA_NPU
-+      tristate "Airoha NPU support"
-+      select WANT_DEV_COREDUMP
-+      select REGMAP_MMIO
-+      help
-+        This driver supports Airoha Network Processor (NPU) available
-+        on the Airoha Soc family.
-+
- config NET_AIROHA
-       tristate "Airoha SoC Gigabit Ethernet support"
-       depends on NET_DSA || !NET_DSA
-+      select NET_AIROHA_NPU
-       select PAGE_POOL
-       help
-         This driver supports the gigabit ethernet MACs in the
---- a/drivers/net/ethernet/airoha/Makefile
-+++ b/drivers/net/ethernet/airoha/Makefile
-@@ -4,3 +4,4 @@
- #
- obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
-+obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -240,6 +240,8 @@ struct airoha_eth {
-       unsigned long state;
-       void __iomem *fe_regs;
-+      struct airoha_npu __rcu *npu;
-+
-       struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
-       struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
---- /dev/null
-+++ b/drivers/net/ethernet/airoha/airoha_npu.c
-@@ -0,0 +1,520 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2025 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ */
-+
-+#include <linux/devcoredump.h>
-+#include <linux/firmware.h>
-+#include <linux/platform_device.h>
-+#include <linux/of_net.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_reserved_mem.h>
-+#include <linux/regmap.h>
-+
-+#include "airoha_npu.h"
-+
-+#define NPU_EN7581_FIRMWARE_DATA              "airoha/en7581_npu_data.bin"
-+#define NPU_EN7581_FIRMWARE_RV32              "airoha/en7581_npu_rv32.bin"
-+#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE     0x200000
-+#define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE     0x10000
-+#define NPU_DUMP_SIZE                         512
-+
-+#define REG_NPU_LOCAL_SRAM            0x0
-+
-+#define NPU_PC_BASE_ADDR              0x305000
-+#define REG_PC_DBG(_n)                        (0x305000 + ((_n) * 0x100))
-+
-+#define NPU_CLUSTER_BASE_ADDR         0x306000
-+
-+#define REG_CR_BOOT_TRIGGER           (NPU_CLUSTER_BASE_ADDR + 0x000)
-+#define REG_CR_BOOT_CONFIG            (NPU_CLUSTER_BASE_ADDR + 0x004)
-+#define REG_CR_BOOT_BASE(_n)          (NPU_CLUSTER_BASE_ADDR + 0x020 + ((_n) << 2))
-+
-+#define NPU_MBOX_BASE_ADDR            0x30c000
-+
-+#define REG_CR_MBOX_INT_STATUS                (NPU_MBOX_BASE_ADDR + 0x000)
-+#define MBOX_INT_STATUS_MASK          BIT(8)
-+
-+#define REG_CR_MBOX_INT_MASK(_n)      (NPU_MBOX_BASE_ADDR + 0x004 + ((_n) << 2))
-+#define REG_CR_MBQ0_CTRL(_n)          (NPU_MBOX_BASE_ADDR + 0x030 + ((_n) << 2))
-+#define REG_CR_MBQ8_CTRL(_n)          (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
-+#define REG_CR_NPU_MIB(_n)            (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
-+
-+#define NPU_TIMER_BASE_ADDR           0x310100
-+#define REG_WDT_TIMER_CTRL(_n)                (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
-+#define WDT_EN_MASK                   BIT(25)
-+#define WDT_INTR_MASK                 BIT(21)
-+
-+enum {
-+      NPU_OP_SET = 1,
-+      NPU_OP_SET_NO_WAIT,
-+      NPU_OP_GET,
-+      NPU_OP_GET_NO_WAIT,
-+};
-+
-+enum {
-+      NPU_FUNC_WIFI,
-+      NPU_FUNC_TUNNEL,
-+      NPU_FUNC_NOTIFY,
-+      NPU_FUNC_DBA,
-+      NPU_FUNC_TR471,
-+      NPU_FUNC_PPE,
-+};
-+
-+enum {
-+      NPU_MBOX_ERROR,
-+      NPU_MBOX_SUCCESS,
-+};
-+
-+enum {
-+      PPE_FUNC_SET_WAIT,
-+      PPE_FUNC_SET_WAIT_HWNAT_INIT,
-+      PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
-+      PPE_FUNC_SET_WAIT_API,
-+};
-+
-+enum {
-+      PPE2_SRAM_SET_ENTRY,
-+      PPE_SRAM_SET_ENTRY,
-+      PPE_SRAM_SET_VAL,
-+      PPE_SRAM_RESET_VAL,
-+};
-+
-+enum {
-+      QDMA_WAN_ETHER = 1,
-+      QDMA_WAN_PON_XDSL,
-+};
-+
-+#define MBOX_MSG_FUNC_ID      GENMASK(14, 11)
-+#define MBOX_MSG_STATIC_BUF   BIT(5)
-+#define MBOX_MSG_STATUS               GENMASK(4, 2)
-+#define MBOX_MSG_DONE         BIT(1)
-+#define MBOX_MSG_WAIT_RSP     BIT(0)
-+
-+#define PPE_TYPE_L2B_IPV4     2
-+#define PPE_TYPE_L2B_IPV4_IPV6        3
-+
-+struct ppe_mbox_data {
-+      u32 func_type;
-+      u32 func_id;
-+      union {
-+              struct {
-+                      u8 cds;
-+                      u8 xpon_hal_api;
-+                      u8 wan_xsi;
-+                      u8 ct_joyme4;
-+                      int ppe_type;
-+                      int wan_mode;
-+                      int wan_sel;
-+              } init_info;
-+              struct {
-+                      int func_id;
-+                      u32 size;
-+                      u32 data;
-+              } set_info;
-+      };
-+};
-+
-+static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
-+                             void *p, int size)
-+{
-+      u16 core = 0; /* FIXME */
-+      u32 val, offset = core << 4;
-+      dma_addr_t dma_addr;
-+      void *addr;
-+      int ret;
-+
-+      addr = kmemdup(p, size, GFP_ATOMIC);
-+      if (!addr)
-+              return -ENOMEM;
-+
-+      dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE);
-+      ret = dma_mapping_error(npu->dev, dma_addr);
-+      if (ret)
-+              goto out;
-+
-+      spin_lock_bh(&npu->cores[core].lock);
-+
-+      regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(0) + offset, dma_addr);
-+      regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(1) + offset, size);
-+      regmap_read(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, &val);
-+      regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, val + 1);
-+      val = FIELD_PREP(MBOX_MSG_FUNC_ID, func_id) | MBOX_MSG_WAIT_RSP;
-+      regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(3) + offset, val);
-+
-+      ret = regmap_read_poll_timeout_atomic(npu->regmap,
-+                                            REG_CR_MBQ0_CTRL(3) + offset,
-+                                            val, (val & MBOX_MSG_DONE),
-+                                            100, 100 * MSEC_PER_SEC);
-+      if (!ret && FIELD_GET(MBOX_MSG_STATUS, val) != NPU_MBOX_SUCCESS)
-+              ret = -EINVAL;
-+
-+      spin_unlock_bh(&npu->cores[core].lock);
-+
-+      dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
-+out:
-+      kfree(addr);
-+
-+      return ret;
-+}
-+
-+static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
-+                                 struct reserved_mem *rmem)
-+{
-+      const struct firmware *fw;
-+      void __iomem *addr;
-+      int ret;
-+
-+      ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_RV32, dev);
-+      if (ret)
-+              return ret == -ENOENT ? -EPROBE_DEFER : ret;
-+
-+      if (fw->size > NPU_EN7581_FIRMWARE_RV32_MAX_SIZE) {
-+              dev_err(dev, "%s: fw size too overlimit (%zu)\n",
-+                      NPU_EN7581_FIRMWARE_RV32, fw->size);
-+              ret = -E2BIG;
-+              goto out;
-+      }
-+
-+      addr = devm_ioremap(dev, rmem->base, rmem->size);
-+      if (!addr) {
-+              ret = -ENOMEM;
-+              goto out;
-+      }
-+
-+      memcpy_toio(addr, fw->data, fw->size);
-+      release_firmware(fw);
-+
-+      ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_DATA, dev);
-+      if (ret)
-+              return ret == -ENOENT ? -EPROBE_DEFER : ret;
-+
-+      if (fw->size > NPU_EN7581_FIRMWARE_DATA_MAX_SIZE) {
-+              dev_err(dev, "%s: fw size too overlimit (%zu)\n",
-+                      NPU_EN7581_FIRMWARE_DATA, fw->size);
-+              ret = -E2BIG;
-+              goto out;
-+      }
-+
-+      memcpy_toio(base + REG_NPU_LOCAL_SRAM, fw->data, fw->size);
-+out:
-+      release_firmware(fw);
-+
-+      return ret;
-+}
-+
-+static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance)
-+{
-+      struct airoha_npu *npu = npu_instance;
-+
-+      /* clear mbox interrupt status */
-+      regmap_write(npu->regmap, REG_CR_MBOX_INT_STATUS,
-+                   MBOX_INT_STATUS_MASK);
-+
-+      /* acknowledge npu */
-+      regmap_update_bits(npu->regmap, REG_CR_MBQ8_CTRL(3),
-+                         MBOX_MSG_STATUS | MBOX_MSG_DONE, MBOX_MSG_DONE);
-+
-+      return IRQ_HANDLED;
-+}
-+
-+static void airoha_npu_wdt_work(struct work_struct *work)
-+{
-+      struct airoha_npu_core *core;
-+      struct airoha_npu *npu;
-+      void *dump;
-+      u32 val[3];
-+      int c;
-+
-+      core = container_of(work, struct airoha_npu_core, wdt_work);
-+      npu = core->npu;
-+
-+      dump = vzalloc(NPU_DUMP_SIZE);
-+      if (!dump)
-+              return;
-+
-+      c = core - &npu->cores[0];
-+      regmap_bulk_read(npu->regmap, REG_PC_DBG(c), val, ARRAY_SIZE(val));
-+      snprintf(dump, NPU_DUMP_SIZE, "PC: %08x SP: %08x LR: %08x\n",
-+               val[0], val[1], val[2]);
-+
-+      dev_coredumpv(npu->dev, dump, NPU_DUMP_SIZE, GFP_KERNEL);
-+}
-+
-+static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance)
-+{
-+      struct airoha_npu_core *core = core_instance;
-+      struct airoha_npu *npu = core->npu;
-+      int c = core - &npu->cores[0];
-+      u32 val;
-+
-+      regmap_set_bits(npu->regmap, REG_WDT_TIMER_CTRL(c), WDT_INTR_MASK);
-+      if (!regmap_read(npu->regmap, REG_WDT_TIMER_CTRL(c), &val) &&
-+          FIELD_GET(WDT_EN_MASK, val))
-+              schedule_work(&core->wdt_work);
-+
-+      return IRQ_HANDLED;
-+}
-+
-+static int airoha_npu_ppe_init(struct airoha_npu *npu)
-+{
-+      struct ppe_mbox_data ppe_data = {
-+              .func_type = NPU_OP_SET,
-+              .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT,
-+              .init_info = {
-+                      .ppe_type = PPE_TYPE_L2B_IPV4_IPV6,
-+                      .wan_mode = QDMA_WAN_ETHER,
-+              },
-+      };
-+
-+      return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
-+                                 sizeof(struct ppe_mbox_data));
-+}
-+
-+static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
-+{
-+      struct ppe_mbox_data ppe_data = {
-+              .func_type = NPU_OP_SET,
-+              .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
-+      };
-+
-+      return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
-+                                 sizeof(struct ppe_mbox_data));
-+}
-+
-+static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
-+                                           dma_addr_t foe_addr,
-+                                           int sram_num_entries)
-+{
-+      struct ppe_mbox_data ppe_data = {
-+              .func_type = NPU_OP_SET,
-+              .func_id = PPE_FUNC_SET_WAIT_API,
-+              .set_info = {
-+                      .func_id = PPE_SRAM_RESET_VAL,
-+                      .data = foe_addr,
-+                      .size = sram_num_entries,
-+              },
-+      };
-+
-+      return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
-+                                 sizeof(struct ppe_mbox_data));
-+}
-+
-+static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
-+                                     dma_addr_t foe_addr,
-+                                     u32 entry_size, u32 hash, bool ppe2)
-+{
-+      struct ppe_mbox_data ppe_data = {
-+              .func_type = NPU_OP_SET,
-+              .func_id = PPE_FUNC_SET_WAIT_API,
-+              .set_info = {
-+                      .data = foe_addr,
-+                      .size = entry_size,
-+              },
-+      };
-+      int err;
-+
-+      ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
-+                                       : PPE_SRAM_SET_ENTRY;
-+
-+      err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
-+                                sizeof(struct ppe_mbox_data));
-+      if (err)
-+              return err;
-+
-+      ppe_data.set_info.func_id = PPE_SRAM_SET_VAL;
-+      ppe_data.set_info.data = hash;
-+      ppe_data.set_info.size = sizeof(u32);
-+
-+      return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
-+                                 sizeof(struct ppe_mbox_data));
-+}
-+
-+struct airoha_npu *airoha_npu_get(struct device *dev)
-+{
-+      struct platform_device *pdev;
-+      struct device_node *np;
-+      struct airoha_npu *npu;
-+
-+      np = of_parse_phandle(dev->of_node, "airoha,npu", 0);
-+      if (!np)
-+              return ERR_PTR(-ENODEV);
-+
-+      pdev = of_find_device_by_node(np);
-+      of_node_put(np);
-+
-+      if (!pdev) {
-+              dev_err(dev, "cannot find device node %s\n", np->name);
-+              return ERR_PTR(-ENODEV);
-+      }
-+
-+      if (!try_module_get(THIS_MODULE)) {
-+              dev_err(dev, "failed to get the device driver module\n");
-+              npu = ERR_PTR(-ENODEV);
-+              goto error_pdev_put;
-+      }
-+
-+      npu = platform_get_drvdata(pdev);
-+      if (!npu) {
-+              npu = ERR_PTR(-ENODEV);
-+              goto error_module_put;
-+      }
-+
-+      if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
-+              dev_err(&pdev->dev,
-+                      "failed to create device link to consumer %s\n",
-+                      dev_name(dev));
-+              npu = ERR_PTR(-EINVAL);
-+              goto error_module_put;
-+      }
-+
-+      return npu;
-+
-+error_module_put:
-+      module_put(THIS_MODULE);
-+error_pdev_put:
-+      platform_device_put(pdev);
-+
-+      return npu;
-+}
-+EXPORT_SYMBOL_GPL(airoha_npu_get);
-+
-+void airoha_npu_put(struct airoha_npu *npu)
-+{
-+      module_put(THIS_MODULE);
-+      put_device(npu->dev);
-+}
-+EXPORT_SYMBOL_GPL(airoha_npu_put);
-+
-+static const struct of_device_id of_airoha_npu_match[] = {
-+      { .compatible = "airoha,en7581-npu" },
-+      { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, of_airoha_npu_match);
-+
-+static const struct regmap_config regmap_config = {
-+      .name                   = "npu",
-+      .reg_bits               = 32,
-+      .val_bits               = 32,
-+      .reg_stride             = 4,
-+      .disable_locking        = true,
-+};
-+
-+static int airoha_npu_probe(struct platform_device *pdev)
-+{
-+      struct device *dev = &pdev->dev;
-+      struct reserved_mem *rmem;
-+      struct airoha_npu *npu;
-+      struct device_node *np;
-+      void __iomem *base;
-+      int i, irq, err;
-+
-+      base = devm_platform_ioremap_resource(pdev, 0);
-+      if (IS_ERR(base))
-+              return PTR_ERR(base);
-+
-+      npu = devm_kzalloc(dev, sizeof(*npu), GFP_KERNEL);
-+      if (!npu)
-+              return -ENOMEM;
-+
-+      npu->dev = dev;
-+      npu->ops.ppe_init = airoha_npu_ppe_init;
-+      npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
-+      npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
-+      npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
-+
-+      npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
-+      if (IS_ERR(npu->regmap))
-+              return PTR_ERR(npu->regmap);
-+
-+      np = of_parse_phandle(dev->of_node, "memory-region", 0);
-+      if (!np)
-+              return -ENODEV;
-+
-+      rmem = of_reserved_mem_lookup(np);
-+      of_node_put(np);
-+
-+      if (!rmem)
-+              return -ENODEV;
-+
-+      irq = platform_get_irq(pdev, 0);
-+      if (irq < 0)
-+              return irq;
-+
-+      err = devm_request_irq(dev, irq, airoha_npu_mbox_handler,
-+                             IRQF_SHARED, "airoha-npu-mbox", npu);
-+      if (err)
-+              return err;
-+
-+      for (i = 0; i < ARRAY_SIZE(npu->cores); i++) {
-+              struct airoha_npu_core *core = &npu->cores[i];
-+
-+              spin_lock_init(&core->lock);
-+              core->npu = npu;
-+
-+              irq = platform_get_irq(pdev, i + 1);
-+              if (irq < 0)
-+                      return irq;
-+
-+              err = devm_request_irq(dev, irq, airoha_npu_wdt_handler,
-+                                     IRQF_SHARED, "airoha-npu-wdt", core);
-+              if (err)
-+                      return err;
-+
-+              INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
-+      }
-+
-+      err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
-+      if (err)
-+              return err;
-+
-+      err = airoha_npu_run_firmware(dev, base, rmem);
-+      if (err)
-+              return dev_err_probe(dev, err, "failed to run npu firmware\n");
-+
-+      regmap_write(npu->regmap, REG_CR_NPU_MIB(10),
-+                   rmem->base + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
-+      regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */
-+      regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0);
-+      regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1);
-+      msleep(100);
-+
-+      /* setting booting address */
-+      for (i = 0; i < NPU_NUM_CORES; i++)
-+              regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), rmem->base);
-+      usleep_range(1000, 2000);
-+
-+      /* enable NPU cores */
-+      /* do not start core3 since it is used for WiFi offloading */
-+      regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xf7);
-+      regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
-+      msleep(100);
-+
-+      platform_set_drvdata(pdev, npu);
-+
-+      return 0;
-+}
-+
-+static void airoha_npu_remove(struct platform_device *pdev)
-+{
-+      struct airoha_npu *npu = platform_get_drvdata(pdev);
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(npu->cores); i++)
-+              cancel_work_sync(&npu->cores[i].wdt_work);
-+}
-+
-+static struct platform_driver airoha_npu_driver = {
-+      .probe = airoha_npu_probe,
-+      .remove_new = airoha_npu_remove,
-+      .driver = {
-+              .name = "airoha-npu",
-+              .of_match_table = of_airoha_npu_match,
-+      },
-+};
-+module_platform_driver(airoha_npu_driver);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
-+MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
---- /dev/null
-+++ b/drivers/net/ethernet/airoha/airoha_npu.h
-@@ -0,0 +1,34 @@
-+/* SPDX-License-Identifier: GPL-2.0-only */
-+/*
-+ * Copyright (c) 2025 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ */
-+
-+#define NPU_NUM_CORES         8
-+
-+struct airoha_npu {
-+      struct device *dev;
-+      struct regmap *regmap;
-+
-+      struct airoha_npu_core {
-+              struct airoha_npu *npu;
-+              /* protect concurrent npu memory accesses */
-+              spinlock_t lock;
-+              struct work_struct wdt_work;
-+      } cores[NPU_NUM_CORES];
-+
-+      struct {
-+              int (*ppe_init)(struct airoha_npu *npu);
-+              int (*ppe_deinit)(struct airoha_npu *npu);
-+              int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
-+                                            dma_addr_t foe_addr,
-+                                            int sram_num_entries);
-+              int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
-+                                          dma_addr_t foe_addr,
-+                                          u32 entry_size, u32 hash,
-+                                          bool ppe2);
-+      } ops;
-+};
-+
-+struct airoha_npu *airoha_npu_get(struct device *dev);
-+void airoha_npu_put(struct airoha_npu *npu);
diff --git a/target/linux/airoha/patches-6.6/048-13-v6.15-net-airoha-Introduce-flowtable-offload-support.patch b/target/linux/airoha/patches-6.6/048-13-v6.15-net-airoha-Introduce-flowtable-offload-support.patch
deleted file mode 100644 (file)
index 225165f..0000000
+++ /dev/null
@@ -1,1481 +0,0 @@
-From 00a7678310fe3d3f408513e55d9a0b67f0db380f Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:21 +0100
-Subject: [PATCH 13/15] net: airoha: Introduce flowtable offload support
-
-Introduce netfilter flowtable integration in order to allow airoha_eth
-driver to offload 5-tuple flower rules learned by the PPE module if the
-user accelerates them using a nft configuration similar to the one reported
-below:
-
-table inet filter {
-       flowtable ft {
-               hook ingress priority filter
-               devices = { lan1, lan2, lan3, lan4, eth1 }
-               flags offload;
-       }
-       chain forward {
-               type filter hook forward priority filter; policy accept;
-               meta l4proto { tcp, udp } flow add @ft
-       }
-}
-
-Tested-by: Sayantan Nandy <sayantan.nandy@airoha.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/Makefile      |   3 +-
- drivers/net/ethernet/airoha/airoha_eth.c  |  60 +-
- drivers/net/ethernet/airoha/airoha_eth.h  | 250 ++++++
- drivers/net/ethernet/airoha/airoha_ppe.c  | 901 ++++++++++++++++++++++
- drivers/net/ethernet/airoha/airoha_regs.h | 107 ++-
- 5 files changed, 1314 insertions(+), 7 deletions(-)
- create mode 100644 drivers/net/ethernet/airoha/airoha_ppe.c
-
---- a/drivers/net/ethernet/airoha/Makefile
-+++ b/drivers/net/ethernet/airoha/Makefile
-@@ -3,5 +3,6 @@
- # Airoha for the Mediatek SoCs built-in ethernet macs
- #
--obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
-+obj-$(CONFIG_NET_AIROHA) += airoha-eth.o
-+airoha-eth-y := airoha_eth.o airoha_ppe.o
- obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -8,7 +8,6 @@
- #include <linux/platform_device.h>
- #include <linux/tcp.h>
- #include <linux/u64_stats_sync.h>
--#include <net/dsa.h>
- #include <net/dst_metadata.h>
- #include <net/page_pool/helpers.h>
- #include <net/pkt_cls.h>
-@@ -619,6 +618,7 @@ static int airoha_qdma_rx_process(struct
-       while (done < budget) {
-               struct airoha_queue_entry *e = &q->entry[q->tail];
-               struct airoha_qdma_desc *desc = &q->desc[q->tail];
-+              u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
-               dma_addr_t dma_addr = le32_to_cpu(desc->addr);
-               u32 desc_ctrl = le32_to_cpu(desc->ctrl);
-               struct airoha_gdm_port *port;
-@@ -681,6 +681,15 @@ static int airoha_qdma_rx_process(struct
-                                                 &port->dsa_meta[sptag]->dst);
-               }
-+              hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
-+              if (hash != AIROHA_RXD4_FOE_ENTRY)
-+                      skb_set_hash(skb, jhash_1word(hash, 0),
-+                                   PKT_HASH_TYPE_L4);
-+
-+              reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
-+              if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
-+                      airoha_ppe_check_skb(eth->ppe, hash);
-+
-               napi_gro_receive(&q->napi, skb);
-               done++;
-@@ -1302,6 +1311,10 @@ static int airoha_hw_init(struct platfor
-                       return err;
-       }
-+      err = airoha_ppe_init(eth);
-+      if (err)
-+              return err;
-+
-       set_bit(DEV_STATE_INITIALIZED, &eth->state);
-       return 0;
-@@ -2166,6 +2179,47 @@ static int airoha_tc_htb_alloc_leaf_queu
-       return 0;
- }
-+static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
-+                                   struct flow_block_offload *f)
-+{
-+      flow_setup_cb_t *cb = airoha_ppe_setup_tc_block_cb;
-+      static LIST_HEAD(block_cb_list);
-+      struct flow_block_cb *block_cb;
-+
-+      if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
-+              return -EOPNOTSUPP;
-+
-+      f->driver_block_list = &block_cb_list;
-+      switch (f->command) {
-+      case FLOW_BLOCK_BIND:
-+              block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
-+              if (block_cb) {
-+                      flow_block_cb_incref(block_cb);
-+                      return 0;
-+              }
-+              block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL);
-+              if (IS_ERR(block_cb))
-+                      return PTR_ERR(block_cb);
-+
-+              flow_block_cb_incref(block_cb);
-+              flow_block_cb_add(block_cb, f);
-+              list_add_tail(&block_cb->driver_list, &block_cb_list);
-+              return 0;
-+      case FLOW_BLOCK_UNBIND:
-+              block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
-+              if (!block_cb)
-+                      return -ENOENT;
-+
-+              if (!flow_block_cb_decref(block_cb)) {
-+                      flow_block_cb_remove(block_cb, f);
-+                      list_del(&block_cb->driver_list);
-+              }
-+              return 0;
-+      default:
-+              return -EOPNOTSUPP;
-+      }
-+}
-+
- static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
- {
-       struct net_device *dev = port->dev;
-@@ -2249,6 +2303,9 @@ static int airoha_dev_tc_setup(struct ne
-               return airoha_tc_setup_qdisc_ets(port, type_data);
-       case TC_SETUP_QDISC_HTB:
-               return airoha_tc_setup_qdisc_htb(port, type_data);
-+      case TC_SETUP_BLOCK:
-+      case TC_SETUP_FT:
-+              return airoha_dev_setup_tc_block(port, type_data);
-       default:
-               return -EOPNOTSUPP;
-       }
-@@ -2505,6 +2562,7 @@ static void airoha_remove(struct platfor
-       }
-       free_netdev(eth->napi_dev);
-+      airoha_ppe_deinit(eth);
-       platform_set_drvdata(pdev, NULL);
- }
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -12,6 +12,7 @@
- #include <linux/kernel.h>
- #include <linux/netdevice.h>
- #include <linux/reset.h>
-+#include <net/dsa.h>
- #define AIROHA_MAX_NUM_GDM_PORTS      4
- #define AIROHA_MAX_NUM_QDMA           2
-@@ -44,6 +45,15 @@
- #define QDMA_METER_IDX(_n)            ((_n) & 0xff)
- #define QDMA_METER_GROUP(_n)          (((_n) >> 8) & 0x3)
-+#define PPE_NUM                               2
-+#define PPE1_SRAM_NUM_ENTRIES         (8 * 1024)
-+#define PPE_SRAM_NUM_ENTRIES          (2 * PPE1_SRAM_NUM_ENTRIES)
-+#define PPE_DRAM_NUM_ENTRIES          (16 * 1024)
-+#define PPE_NUM_ENTRIES                       (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES)
-+#define PPE_HASH_MASK                 (PPE_NUM_ENTRIES - 1)
-+#define PPE_ENTRY_SIZE                        80
-+#define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10))
-+
- #define MTK_HDR_LEN                   4
- #define MTK_HDR_XMIT_TAGGED_TPID_8100 1
- #define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
-@@ -195,6 +205,224 @@ struct airoha_hw_stats {
-       u64 rx_len[7];
- };
-+enum {
-+      PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
-+};
-+
-+enum {
-+      AIROHA_FOE_STATE_INVALID,
-+      AIROHA_FOE_STATE_UNBIND,
-+      AIROHA_FOE_STATE_BIND,
-+      AIROHA_FOE_STATE_FIN
-+};
-+
-+enum {
-+      PPE_PKT_TYPE_IPV4_HNAPT = 0,
-+      PPE_PKT_TYPE_IPV4_ROUTE = 1,
-+      PPE_PKT_TYPE_BRIDGE = 2,
-+      PPE_PKT_TYPE_IPV4_DSLITE = 3,
-+      PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
-+      PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
-+      PPE_PKT_TYPE_IPV6_6RD = 7,
-+};
-+
-+#define AIROHA_FOE_MAC_SMAC_ID                GENMASK(20, 16)
-+#define AIROHA_FOE_MAC_PPPOE_ID               GENMASK(15, 0)
-+
-+struct airoha_foe_mac_info_common {
-+      u16 vlan1;
-+      u16 etype;
-+
-+      u32 dest_mac_hi;
-+
-+      u16 vlan2;
-+      u16 dest_mac_lo;
-+
-+      u32 src_mac_hi;
-+};
-+
-+struct airoha_foe_mac_info {
-+      struct airoha_foe_mac_info_common common;
-+
-+      u16 pppoe_id;
-+      u16 src_mac_lo;
-+};
-+
-+#define AIROHA_FOE_IB1_UNBIND_PREBIND         BIT(24)
-+#define AIROHA_FOE_IB1_UNBIND_PACKETS         GENMASK(23, 8)
-+#define AIROHA_FOE_IB1_UNBIND_TIMESTAMP               GENMASK(7, 0)
-+
-+#define AIROHA_FOE_IB1_BIND_STATIC            BIT(31)
-+#define AIROHA_FOE_IB1_BIND_UDP                       BIT(30)
-+#define AIROHA_FOE_IB1_BIND_STATE             GENMASK(29, 28)
-+#define AIROHA_FOE_IB1_BIND_PACKET_TYPE               GENMASK(27, 25)
-+#define AIROHA_FOE_IB1_BIND_TTL                       BIT(24)
-+#define AIROHA_FOE_IB1_BIND_TUNNEL_DECAP      BIT(23)
-+#define AIROHA_FOE_IB1_BIND_PPPOE             BIT(22)
-+#define AIROHA_FOE_IB1_BIND_VPM                       GENMASK(21, 20)
-+#define AIROHA_FOE_IB1_BIND_VLAN_LAYER                GENMASK(19, 16)
-+#define AIROHA_FOE_IB1_BIND_KEEPALIVE         BIT(15)
-+#define AIROHA_FOE_IB1_BIND_TIMESTAMP         GENMASK(14, 0)
-+
-+#define AIROHA_FOE_IB2_DSCP                   GENMASK(31, 24)
-+#define AIROHA_FOE_IB2_PORT_AG                        GENMASK(23, 13)
-+#define AIROHA_FOE_IB2_PCP                    BIT(12)
-+#define AIROHA_FOE_IB2_MULTICAST              BIT(11)
-+#define AIROHA_FOE_IB2_FAST_PATH              BIT(10)
-+#define AIROHA_FOE_IB2_PSE_QOS                        BIT(9)
-+#define AIROHA_FOE_IB2_PSE_PORT                       GENMASK(8, 5)
-+#define AIROHA_FOE_IB2_NBQ                    GENMASK(4, 0)
-+
-+#define AIROHA_FOE_ACTDP                      GENMASK(31, 24)
-+#define AIROHA_FOE_SHAPER_ID                  GENMASK(23, 16)
-+#define AIROHA_FOE_CHANNEL                    GENMASK(15, 11)
-+#define AIROHA_FOE_QID                                GENMASK(10, 8)
-+#define AIROHA_FOE_DPI                                BIT(7)
-+#define AIROHA_FOE_TUNNEL                     BIT(6)
-+#define AIROHA_FOE_TUNNEL_ID                  GENMASK(5, 0)
-+
-+struct airoha_foe_bridge {
-+      u32 dest_mac_hi;
-+
-+      u16 src_mac_hi;
-+      u16 dest_mac_lo;
-+
-+      u32 src_mac_lo;
-+
-+      u32 ib2;
-+
-+      u32 rsv[5];
-+
-+      u32 data;
-+
-+      struct airoha_foe_mac_info l2;
-+};
-+
-+struct airoha_foe_ipv4_tuple {
-+      u32 src_ip;
-+      u32 dest_ip;
-+      union {
-+              struct {
-+                      u16 dest_port;
-+                      u16 src_port;
-+              };
-+              struct {
-+                      u8 protocol;
-+                      u8 _pad[3]; /* fill with 0xa5a5a5 */
-+              };
-+              u32 ports;
-+      };
-+};
-+
-+struct airoha_foe_ipv4 {
-+      struct airoha_foe_ipv4_tuple orig_tuple;
-+
-+      u32 ib2;
-+
-+      struct airoha_foe_ipv4_tuple new_tuple;
-+
-+      u32 rsv[2];
-+
-+      u32 data;
-+
-+      struct airoha_foe_mac_info l2;
-+};
-+
-+struct airoha_foe_ipv4_dslite {
-+      struct airoha_foe_ipv4_tuple ip4;
-+
-+      u32 ib2;
-+
-+      u8 flow_label[3];
-+      u8 priority;
-+
-+      u32 rsv[4];
-+
-+      u32 data;
-+
-+      struct airoha_foe_mac_info l2;
-+};
-+
-+struct airoha_foe_ipv6 {
-+      u32 src_ip[4];
-+      u32 dest_ip[4];
-+
-+      union {
-+              struct {
-+                      u16 dest_port;
-+                      u16 src_port;
-+              };
-+              struct {
-+                      u8 protocol;
-+                      u8 pad[3];
-+              };
-+              u32 ports;
-+      };
-+
-+      u32 data;
-+
-+      u32 ib2;
-+
-+      struct airoha_foe_mac_info_common l2;
-+};
-+
-+struct airoha_foe_entry {
-+      union {
-+              struct {
-+                      u32 ib1;
-+                      union {
-+                              struct airoha_foe_bridge bridge;
-+                              struct airoha_foe_ipv4 ipv4;
-+                              struct airoha_foe_ipv4_dslite dslite;
-+                              struct airoha_foe_ipv6 ipv6;
-+                              DECLARE_FLEX_ARRAY(u32, d);
-+                      };
-+              };
-+              u8 data[PPE_ENTRY_SIZE];
-+      };
-+};
-+
-+struct airoha_flow_data {
-+      struct ethhdr eth;
-+
-+      union {
-+              struct {
-+                      __be32 src_addr;
-+                      __be32 dst_addr;
-+              } v4;
-+
-+              struct {
-+                      struct in6_addr src_addr;
-+                      struct in6_addr dst_addr;
-+              } v6;
-+      };
-+
-+      __be16 src_port;
-+      __be16 dst_port;
-+
-+      struct {
-+              struct {
-+                      u16 id;
-+                      __be16 proto;
-+              } hdr[2];
-+              u8 num;
-+      } vlan;
-+      struct {
-+              u16 sid;
-+              u8 num;
-+      } pppoe;
-+};
-+
-+struct airoha_flow_table_entry {
-+      struct hlist_node list;
-+
-+      struct airoha_foe_entry data;
-+      u32 hash;
-+
-+      struct rhash_head node;
-+      unsigned long cookie;
-+};
-+
- struct airoha_qdma {
-       struct airoha_eth *eth;
-       void __iomem *regs;
-@@ -234,6 +462,19 @@ struct airoha_gdm_port {
-       struct metadata_dst *dsa_meta[AIROHA_MAX_DSA_PORTS];
- };
-+#define AIROHA_RXD4_PPE_CPU_REASON    GENMASK(20, 16)
-+#define AIROHA_RXD4_FOE_ENTRY         GENMASK(15, 0)
-+
-+struct airoha_ppe {
-+      struct airoha_eth *eth;
-+
-+      void *foe;
-+      dma_addr_t foe_dma;
-+
-+      struct hlist_head *foe_flow;
-+      u16 foe_check_time[PPE_NUM_ENTRIES];
-+};
-+
- struct airoha_eth {
-       struct device *dev;
-@@ -242,6 +483,9 @@ struct airoha_eth {
-       struct airoha_npu __rcu *npu;
-+      struct airoha_ppe *ppe;
-+      struct rhashtable flow_table;
-+
-       struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
-       struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
-@@ -277,4 +521,10 @@ u32 airoha_rmw(void __iomem *base, u32 o
- #define airoha_qdma_clear(qdma, offset, val)                  \
-       airoha_rmw((qdma)->regs, (offset), (val), 0)
-+void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash);
-+int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
-+                               void *cb_priv);
-+int airoha_ppe_init(struct airoha_eth *eth);
-+void airoha_ppe_deinit(struct airoha_eth *eth);
-+
- #endif /* AIROHA_ETH_H */
---- /dev/null
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -0,0 +1,901 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2025 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ */
-+
-+#include <linux/ip.h>
-+#include <linux/ipv6.h>
-+#include <linux/rhashtable.h>
-+#include <net/ipv6.h>
-+#include <net/pkt_cls.h>
-+
-+#include "airoha_npu.h"
-+#include "airoha_regs.h"
-+#include "airoha_eth.h"
-+
-+static DEFINE_MUTEX(flow_offload_mutex);
-+static DEFINE_SPINLOCK(ppe_lock);
-+
-+static const struct rhashtable_params airoha_flow_table_params = {
-+      .head_offset = offsetof(struct airoha_flow_table_entry, node),
-+      .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
-+      .key_len = sizeof(unsigned long),
-+      .automatic_shrinking = true,
-+};
-+
-+static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
-+{
-+      return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
-+}
-+
-+static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
-+{
-+      u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
-+
-+      return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
-+}
-+
-+static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
-+{
-+      u32 sram_tb_size, sram_num_entries, dram_num_entries;
-+      struct airoha_eth *eth = ppe->eth;
-+      int i;
-+
-+      sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
-+      dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
-+
-+      for (i = 0; i < PPE_NUM; i++) {
-+              int p;
-+
-+              airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
-+                           ppe->foe_dma + sram_tb_size);
-+
-+              airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
-+                            PPE_BIND_AGE0_DELTA_NON_L4 |
-+                            PPE_BIND_AGE0_DELTA_UDP,
-+                            FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
-+                            FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
-+              airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
-+                            PPE_BIND_AGE1_DELTA_TCP_FIN |
-+                            PPE_BIND_AGE1_DELTA_TCP,
-+                            FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
-+                            FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
-+
-+              airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
-+                            PPE_SRAM_TABLE_EN_MASK |
-+                            PPE_SRAM_HASH1_EN_MASK |
-+                            PPE_DRAM_TABLE_EN_MASK |
-+                            PPE_SRAM_HASH0_MODE_MASK |
-+                            PPE_SRAM_HASH1_MODE_MASK |
-+                            PPE_DRAM_HASH0_MODE_MASK |
-+                            PPE_DRAM_HASH1_MODE_MASK,
-+                            FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
-+                            FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
-+                            FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
-+                            FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
-+
-+              airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
-+                            PPE_TB_CFG_SEARCH_MISS_MASK |
-+                            PPE_TB_ENTRY_SIZE_MASK,
-+                            FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
-+                            FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
-+
-+              airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
-+
-+              for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
-+                      airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
-+                                    FP0_EGRESS_MTU_MASK |
-+                                    FP1_EGRESS_MTU_MASK,
-+                                    FIELD_PREP(FP0_EGRESS_MTU_MASK,
-+                                               AIROHA_MAX_MTU) |
-+                                    FIELD_PREP(FP1_EGRESS_MTU_MASK,
-+                                               AIROHA_MAX_MTU));
-+      }
-+
-+      if (airoha_ppe2_is_enabled(eth)) {
-+              sram_num_entries =
-+                      PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES);
-+              airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
-+                            PPE_SRAM_TB_NUM_ENTRY_MASK |
-+                            PPE_DRAM_TB_NUM_ENTRY_MASK,
-+                            FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
-+                                       sram_num_entries) |
-+                            FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
-+                                       dram_num_entries));
-+              airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
-+                            PPE_SRAM_TB_NUM_ENTRY_MASK |
-+                            PPE_DRAM_TB_NUM_ENTRY_MASK,
-+                            FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
-+                                       sram_num_entries) |
-+                            FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
-+                                       dram_num_entries));
-+      } else {
-+              sram_num_entries =
-+                      PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES);
-+              airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
-+                            PPE_SRAM_TB_NUM_ENTRY_MASK |
-+                            PPE_DRAM_TB_NUM_ENTRY_MASK,
-+                            FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
-+                                       sram_num_entries) |
-+                            FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
-+                                       dram_num_entries));
-+      }
-+}
-+
-+static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
-+{
-+      void *dest = eth + act->mangle.offset;
-+      const void *src = &act->mangle.val;
-+
-+      if (act->mangle.offset > 8)
-+              return;
-+
-+      if (act->mangle.mask == 0xffff) {
-+              src += 2;
-+              dest += 2;
-+      }
-+
-+      memcpy(dest, src, act->mangle.mask ? 2 : 4);
-+}
-+
-+static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
-+                                      struct airoha_flow_data *data)
-+{
-+      u32 val = be32_to_cpu((__force __be32)act->mangle.val);
-+
-+      switch (act->mangle.offset) {
-+      case 0:
-+              if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
-+                      data->dst_port = cpu_to_be16(val);
-+              else
-+                      data->src_port = cpu_to_be16(val >> 16);
-+              break;
-+      case 2:
-+              data->dst_port = cpu_to_be16(val);
-+              break;
-+      default:
-+              return -EINVAL;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
-+                                     struct airoha_flow_data *data)
-+{
-+      __be32 *dest;
-+
-+      switch (act->mangle.offset) {
-+      case offsetof(struct iphdr, saddr):
-+              dest = &data->v4.src_addr;
-+              break;
-+      case offsetof(struct iphdr, daddr):
-+              dest = &data->v4.dst_addr;
-+              break;
-+      default:
-+              return -EINVAL;
-+      }
-+
-+      memcpy(dest, &act->mangle.val, sizeof(u32));
-+
-+      return 0;
-+}
-+
-+static int airoha_get_dsa_port(struct net_device **dev)
-+{
-+#if IS_ENABLED(CONFIG_NET_DSA)
-+      struct dsa_port *dp = dsa_port_from_netdev(*dev);
-+
-+      if (IS_ERR(dp))
-+              return -ENODEV;
-+
-+      *dev = dsa_port_to_master(dp);
-+      return dp->index;
-+#else
-+      return -ENODEV;
-+#endif
-+}
-+
-+static int airoha_ppe_foe_entry_prepare(struct airoha_foe_entry *hwe,
-+                                      struct net_device *dev, int type,
-+                                      struct airoha_flow_data *data,
-+                                      int l4proto)
-+{
-+      int dsa_port = airoha_get_dsa_port(&dev);
-+      struct airoha_foe_mac_info_common *l2;
-+      u32 qdata, ports_pad, val;
-+
-+      memset(hwe, 0, sizeof(*hwe));
-+
-+      val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
-+            FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
-+            FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
-+            FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
-+            FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
-+            AIROHA_FOE_IB1_BIND_TTL;
-+      hwe->ib1 = val;
-+
-+      val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
-+      if (dsa_port >= 0)
-+              val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
-+
-+      if (dev) {
-+              struct airoha_gdm_port *port = netdev_priv(dev);
-+              u8 pse_port;
-+
-+              pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
-+              val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
-+      }
-+
-+      /* FIXME: implement QoS support setting pse_port to 2 (loopback)
-+       * for uplink and setting qos bit in ib2
-+       */
-+
-+      if (is_multicast_ether_addr(data->eth.h_dest))
-+              val |= AIROHA_FOE_IB2_MULTICAST;
-+
-+      ports_pad = 0xa5a5a500 | (l4proto & 0xff);
-+      if (type == PPE_PKT_TYPE_IPV4_ROUTE)
-+              hwe->ipv4.orig_tuple.ports = ports_pad;
-+      if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
-+              hwe->ipv6.ports = ports_pad;
-+
-+      qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
-+      if (type == PPE_PKT_TYPE_BRIDGE) {
-+              hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
-+              hwe->bridge.dest_mac_lo =
-+                      get_unaligned_be16(data->eth.h_dest + 4);
-+              hwe->bridge.src_mac_hi =
-+                      get_unaligned_be16(data->eth.h_source);
-+              hwe->bridge.src_mac_lo =
-+                      get_unaligned_be32(data->eth.h_source + 2);
-+              hwe->bridge.data = qdata;
-+              hwe->bridge.ib2 = val;
-+              l2 = &hwe->bridge.l2.common;
-+      } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
-+              hwe->ipv6.data = qdata;
-+              hwe->ipv6.ib2 = val;
-+              l2 = &hwe->ipv6.l2;
-+      } else {
-+              hwe->ipv4.data = qdata;
-+              hwe->ipv4.ib2 = val;
-+              l2 = &hwe->ipv4.l2.common;
-+      }
-+
-+      l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
-+      l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
-+      if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
-+              l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
-+              hwe->ipv4.l2.src_mac_lo =
-+                      get_unaligned_be16(data->eth.h_source + 4);
-+      } else {
-+              l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf);
-+      }
-+
-+      if (data->vlan.num) {
-+              l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
-+              l2->vlan1 = data->vlan.hdr[0].id;
-+              if (data->vlan.num == 2)
-+                      l2->vlan2 = data->vlan.hdr[1].id;
-+      } else if (dsa_port >= 0) {
-+              l2->etype = BIT(15) | BIT(dsa_port);
-+      } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
-+              l2->etype = ETH_P_IPV6;
-+      } else {
-+              l2->etype = ETH_P_IP;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
-+                                             struct airoha_flow_data *data,
-+                                             bool egress)
-+{
-+      int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
-+      struct airoha_foe_ipv4_tuple *t;
-+
-+      switch (type) {
-+      case PPE_PKT_TYPE_IPV4_HNAPT:
-+              if (egress) {
-+                      t = &hwe->ipv4.new_tuple;
-+                      break;
-+              }
-+              fallthrough;
-+      case PPE_PKT_TYPE_IPV4_DSLITE:
-+      case PPE_PKT_TYPE_IPV4_ROUTE:
-+              t = &hwe->ipv4.orig_tuple;
-+              break;
-+      default:
-+              WARN_ON_ONCE(1);
-+              return -EINVAL;
-+      }
-+
-+      t->src_ip = be32_to_cpu(data->v4.src_addr);
-+      t->dest_ip = be32_to_cpu(data->v4.dst_addr);
-+
-+      if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
-+              t->src_port = be16_to_cpu(data->src_port);
-+              t->dest_port = be16_to_cpu(data->dst_port);
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
-+                                             struct airoha_flow_data *data)
-+
-+{
-+      int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
-+      u32 *src, *dest;
-+
-+      switch (type) {
-+      case PPE_PKT_TYPE_IPV6_ROUTE_5T:
-+      case PPE_PKT_TYPE_IPV6_6RD:
-+              hwe->ipv6.src_port = be16_to_cpu(data->src_port);
-+              hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
-+              fallthrough;
-+      case PPE_PKT_TYPE_IPV6_ROUTE_3T:
-+              src = hwe->ipv6.src_ip;
-+              dest = hwe->ipv6.dest_ip;
-+              break;
-+      default:
-+              WARN_ON_ONCE(1);
-+              return -EINVAL;
-+      }
-+
-+      ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
-+      ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
-+
-+      return 0;
-+}
-+
-+static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
-+{
-+      int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
-+      u32 hash, hv1, hv2, hv3;
-+
-+      switch (type) {
-+      case PPE_PKT_TYPE_IPV4_ROUTE:
-+      case PPE_PKT_TYPE_IPV4_HNAPT:
-+              hv1 = hwe->ipv4.orig_tuple.ports;
-+              hv2 = hwe->ipv4.orig_tuple.dest_ip;
-+              hv3 = hwe->ipv4.orig_tuple.src_ip;
-+              break;
-+      case PPE_PKT_TYPE_IPV6_ROUTE_3T:
-+      case PPE_PKT_TYPE_IPV6_ROUTE_5T:
-+              hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
-+              hv1 ^= hwe->ipv6.ports;
-+
-+              hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
-+              hv2 ^= hwe->ipv6.dest_ip[0];
-+
-+              hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
-+              hv3 ^= hwe->ipv6.src_ip[0];
-+              break;
-+      case PPE_PKT_TYPE_IPV4_DSLITE:
-+      case PPE_PKT_TYPE_IPV6_6RD:
-+      default:
-+              WARN_ON_ONCE(1);
-+              return PPE_HASH_MASK;
-+      }
-+
-+      hash = (hv1 & hv2) | ((~hv1) & hv3);
-+      hash = (hash >> 24) | ((hash & 0xffffff) << 8);
-+      hash ^= hv1 ^ hv2 ^ hv3;
-+      hash ^= hash >> 16;
-+      hash &= PPE_NUM_ENTRIES - 1;
-+
-+      return hash;
-+}
-+
-+static struct airoha_foe_entry *
-+airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, u32 hash)
-+{
-+      if (hash < PPE_SRAM_NUM_ENTRIES) {
-+              u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
-+              struct airoha_eth *eth = ppe->eth;
-+              bool ppe2;
-+              u32 val;
-+              int i;
-+
-+              ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
-+                     hash >= PPE1_SRAM_NUM_ENTRIES;
-+              airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
-+                           FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
-+                           PPE_SRAM_CTRL_REQ_MASK);
-+              if (read_poll_timeout_atomic(airoha_fe_rr, val,
-+                                           val & PPE_SRAM_CTRL_ACK_MASK,
-+                                           10, 100, false, eth,
-+                                           REG_PPE_RAM_CTRL(ppe2)))
-+                      return NULL;
-+
-+              for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
-+                      hwe[i] = airoha_fe_rr(eth,
-+                                            REG_PPE_RAM_ENTRY(ppe2, i));
-+      }
-+
-+      return ppe->foe + hash * sizeof(struct airoha_foe_entry);
-+}
-+
-+static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
-+                                       struct airoha_foe_entry *hwe)
-+{
-+      int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
-+      int len;
-+
-+      if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
-+              return false;
-+
-+      if (type > PPE_PKT_TYPE_IPV4_DSLITE)
-+              len = offsetof(struct airoha_foe_entry, ipv6.data);
-+      else
-+              len = offsetof(struct airoha_foe_entry, ipv4.ib2);
-+
-+      return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
-+}
-+
-+static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
-+                                     struct airoha_foe_entry *e,
-+                                     u32 hash)
-+{
-+      struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
-+      u32 ts = airoha_ppe_get_timestamp(ppe);
-+      struct airoha_eth *eth = ppe->eth;
-+
-+      memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
-+      wmb();
-+
-+      e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
-+      e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
-+      hwe->ib1 = e->ib1;
-+
-+      if (hash < PPE_SRAM_NUM_ENTRIES) {
-+              dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
-+              bool ppe2 = airoha_ppe2_is_enabled(eth) &&
-+                          hash >= PPE1_SRAM_NUM_ENTRIES;
-+              struct airoha_npu *npu;
-+              int err = -ENODEV;
-+
-+              rcu_read_lock();
-+              npu = rcu_dereference(eth->npu);
-+              if (npu)
-+                      err = npu->ops.ppe_foe_commit_entry(npu, addr,
-+                                                          sizeof(*hwe), hash,
-+                                                          ppe2);
-+              rcu_read_unlock();
-+
-+              return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
-+{
-+      struct airoha_flow_table_entry *e;
-+      struct airoha_foe_entry *hwe;
-+      struct hlist_node *n;
-+      u32 index, state;
-+
-+      spin_lock_bh(&ppe_lock);
-+
-+      hwe = airoha_ppe_foe_get_entry(ppe, hash);
-+      if (!hwe)
-+              goto unlock;
-+
-+      state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
-+      if (state == AIROHA_FOE_STATE_BIND)
-+              goto unlock;
-+
-+      index = airoha_ppe_foe_get_entry_hash(hwe);
-+      hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
-+              if (airoha_ppe_foe_compare_entry(e, hwe)) {
-+                      airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
-+                      e->hash = hash;
-+                      break;
-+              }
-+      }
-+unlock:
-+      spin_unlock_bh(&ppe_lock);
-+}
-+
-+static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
-+                                          struct airoha_flow_table_entry *e)
-+{
-+      u32 hash = airoha_ppe_foe_get_entry_hash(&e->data);
-+
-+      e->hash = 0xffff;
-+
-+      spin_lock_bh(&ppe_lock);
-+      hlist_add_head(&e->list, &ppe->foe_flow[hash]);
-+      spin_unlock_bh(&ppe_lock);
-+
-+      return 0;
-+}
-+
-+static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
-+                                           struct airoha_flow_table_entry *e)
-+{
-+      spin_lock_bh(&ppe_lock);
-+
-+      hlist_del_init(&e->list);
-+      if (e->hash != 0xffff) {
-+              e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
-+              e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
-+                                        AIROHA_FOE_STATE_INVALID);
-+              airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
-+              e->hash = 0xffff;
-+      }
-+
-+      spin_unlock_bh(&ppe_lock);
-+}
-+
-+static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
-+                                         struct flow_cls_offload *f)
-+{
-+      struct flow_rule *rule = flow_cls_offload_flow_rule(f);
-+      struct airoha_eth *eth = port->qdma->eth;
-+      struct airoha_flow_table_entry *e;
-+      struct airoha_flow_data data = {};
-+      struct net_device *odev = NULL;
-+      struct flow_action_entry *act;
-+      struct airoha_foe_entry hwe;
-+      int err, i, offload_type;
-+      u16 addr_type = 0;
-+      u8 l4proto = 0;
-+
-+      if (rhashtable_lookup(&eth->flow_table, &f->cookie,
-+                            airoha_flow_table_params))
-+              return -EEXIST;
-+
-+      if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
-+              return -EOPNOTSUPP;
-+
-+      if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
-+              struct flow_match_control match;
-+
-+              flow_rule_match_control(rule, &match);
-+              addr_type = match.key->addr_type;
-+              if (flow_rule_has_control_flags(match.mask->flags,
-+                                              f->common.extack))
-+                      return -EOPNOTSUPP;
-+      } else {
-+              return -EOPNOTSUPP;
-+      }
-+
-+      if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
-+              struct flow_match_basic match;
-+
-+              flow_rule_match_basic(rule, &match);
-+              l4proto = match.key->ip_proto;
-+      } else {
-+              return -EOPNOTSUPP;
-+      }
-+
-+      switch (addr_type) {
-+      case 0:
-+              offload_type = PPE_PKT_TYPE_BRIDGE;
-+              if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
-+                      struct flow_match_eth_addrs match;
-+
-+                      flow_rule_match_eth_addrs(rule, &match);
-+                      memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
-+                      memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
-+              } else {
-+                      return -EOPNOTSUPP;
-+              }
-+              break;
-+      case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
-+              offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
-+              break;
-+      case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
-+              offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
-+              break;
-+      default:
-+              return -EOPNOTSUPP;
-+      }
-+
-+      flow_action_for_each(i, act, &rule->action) {
-+              switch (act->id) {
-+              case FLOW_ACTION_MANGLE:
-+                      if (offload_type == PPE_PKT_TYPE_BRIDGE)
-+                              return -EOPNOTSUPP;
-+
-+                      if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
-+                              airoha_ppe_flow_mangle_eth(act, &data.eth);
-+                      break;
-+              case FLOW_ACTION_REDIRECT:
-+                      odev = act->dev;
-+                      break;
-+              case FLOW_ACTION_CSUM:
-+                      break;
-+              case FLOW_ACTION_VLAN_PUSH:
-+                      if (data.vlan.num == 2 ||
-+                          act->vlan.proto != htons(ETH_P_8021Q))
-+                              return -EOPNOTSUPP;
-+
-+                      data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
-+                      data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
-+                      data.vlan.num++;
-+                      break;
-+              case FLOW_ACTION_VLAN_POP:
-+                      break;
-+              case FLOW_ACTION_PPPOE_PUSH:
-+                      break;
-+              default:
-+                      return -EOPNOTSUPP;
-+              }
-+      }
-+
-+      if (!is_valid_ether_addr(data.eth.h_source) ||
-+          !is_valid_ether_addr(data.eth.h_dest))
-+              return -EINVAL;
-+
-+      err = airoha_ppe_foe_entry_prepare(&hwe, odev, offload_type,
-+                                         &data, l4proto);
-+      if (err)
-+              return err;
-+
-+      if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
-+              struct flow_match_ports ports;
-+
-+              if (offload_type == PPE_PKT_TYPE_BRIDGE)
-+                      return -EOPNOTSUPP;
-+
-+              flow_rule_match_ports(rule, &ports);
-+              data.src_port = ports.key->src;
-+              data.dst_port = ports.key->dst;
-+      } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
-+              return -EOPNOTSUPP;
-+      }
-+
-+      if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
-+              struct flow_match_ipv4_addrs addrs;
-+
-+              flow_rule_match_ipv4_addrs(rule, &addrs);
-+              data.v4.src_addr = addrs.key->src;
-+              data.v4.dst_addr = addrs.key->dst;
-+              airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
-+      }
-+
-+      if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
-+              struct flow_match_ipv6_addrs addrs;
-+
-+              flow_rule_match_ipv6_addrs(rule, &addrs);
-+
-+              data.v6.src_addr = addrs.key->src;
-+              data.v6.dst_addr = addrs.key->dst;
-+              airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
-+      }
-+
-+      flow_action_for_each(i, act, &rule->action) {
-+              if (act->id != FLOW_ACTION_MANGLE)
-+                      continue;
-+
-+              if (offload_type == PPE_PKT_TYPE_BRIDGE)
-+                      return -EOPNOTSUPP;
-+
-+              switch (act->mangle.htype) {
-+              case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
-+              case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
-+                      err = airoha_ppe_flow_mangle_ports(act, &data);
-+                      break;
-+              case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
-+                      err = airoha_ppe_flow_mangle_ipv4(act, &data);
-+                      break;
-+              case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
-+                      /* handled earlier */
-+                      break;
-+              default:
-+                      return -EOPNOTSUPP;
-+              }
-+
-+              if (err)
-+                      return err;
-+      }
-+
-+      if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
-+              err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
-+              if (err)
-+                      return err;
-+      }
-+
-+      e = kzalloc(sizeof(*e), GFP_KERNEL);
-+      if (!e)
-+              return -ENOMEM;
-+
-+      e->cookie = f->cookie;
-+      memcpy(&e->data, &hwe, sizeof(e->data));
-+
-+      err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
-+      if (err)
-+              goto free_entry;
-+
-+      err = rhashtable_insert_fast(&eth->flow_table, &e->node,
-+                                   airoha_flow_table_params);
-+      if (err < 0)
-+              goto remove_foe_entry;
-+
-+      return 0;
-+
-+remove_foe_entry:
-+      airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
-+free_entry:
-+      kfree(e);
-+
-+      return err;
-+}
-+
-+static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
-+                                         struct flow_cls_offload *f)
-+{
-+      struct airoha_eth *eth = port->qdma->eth;
-+      struct airoha_flow_table_entry *e;
-+
-+      e = rhashtable_lookup(&eth->flow_table, &f->cookie,
-+                            airoha_flow_table_params);
-+      if (!e)
-+              return -ENOENT;
-+
-+      airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
-+      rhashtable_remove_fast(&eth->flow_table, &e->node,
-+                             airoha_flow_table_params);
-+      kfree(e);
-+
-+      return 0;
-+}
-+
-+static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
-+                                     struct flow_cls_offload *f)
-+{
-+      switch (f->command) {
-+      case FLOW_CLS_REPLACE:
-+              return airoha_ppe_flow_offload_replace(port, f);
-+      case FLOW_CLS_DESTROY:
-+              return airoha_ppe_flow_offload_destroy(port, f);
-+      default:
-+              break;
-+      }
-+
-+      return -EOPNOTSUPP;
-+}
-+
-+static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
-+                                       struct airoha_npu *npu)
-+{
-+      int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
-+      struct airoha_foe_entry *hwe = ppe->foe;
-+
-+      if (airoha_ppe2_is_enabled(ppe->eth))
-+              sram_num_entries = sram_num_entries / 2;
-+
-+      for (i = 0; i < sram_num_entries; i++)
-+              memset(&hwe[i], 0, sizeof(*hwe));
-+
-+      return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
-+                                             PPE_SRAM_NUM_ENTRIES);
-+}
-+
-+static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
-+{
-+      struct airoha_npu *npu = airoha_npu_get(eth->dev);
-+
-+      if (IS_ERR(npu)) {
-+              request_module("airoha-npu");
-+              npu = airoha_npu_get(eth->dev);
-+      }
-+
-+      return npu;
-+}
-+
-+static int airoha_ppe_offload_setup(struct airoha_eth *eth)
-+{
-+      struct airoha_npu *npu = airoha_ppe_npu_get(eth);
-+      int err;
-+
-+      if (IS_ERR(npu))
-+              return PTR_ERR(npu);
-+
-+      err = npu->ops.ppe_init(npu);
-+      if (err)
-+              goto error_npu_put;
-+
-+      airoha_ppe_hw_init(eth->ppe);
-+      err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
-+      if (err)
-+              goto error_npu_put;
-+
-+      rcu_assign_pointer(eth->npu, npu);
-+      synchronize_rcu();
-+
-+      return 0;
-+
-+error_npu_put:
-+      airoha_npu_put(npu);
-+
-+      return err;
-+}
-+
-+int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
-+                               void *cb_priv)
-+{
-+      struct flow_cls_offload *cls = type_data;
-+      struct net_device *dev = cb_priv;
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct airoha_eth *eth = port->qdma->eth;
-+      int err = 0;
-+
-+      if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER)
-+              return -EOPNOTSUPP;
-+
-+      mutex_lock(&flow_offload_mutex);
-+
-+      if (!eth->npu)
-+              err = airoha_ppe_offload_setup(eth);
-+      if (!err)
-+              err = airoha_ppe_flow_offload_cmd(port, cls);
-+
-+      mutex_unlock(&flow_offload_mutex);
-+
-+      return err;
-+}
-+
-+void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash)
-+{
-+      u16 now, diff;
-+
-+      if (hash > PPE_HASH_MASK)
-+              return;
-+
-+      now = (u16)jiffies;
-+      diff = now - ppe->foe_check_time[hash];
-+      if (diff < HZ / 10)
-+              return;
-+
-+      ppe->foe_check_time[hash] = now;
-+      airoha_ppe_foe_insert_entry(ppe, hash);
-+}
-+
-+int airoha_ppe_init(struct airoha_eth *eth)
-+{
-+      struct airoha_ppe *ppe;
-+      int foe_size;
-+
-+      ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
-+      if (!ppe)
-+              return -ENOMEM;
-+
-+      foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
-+      ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
-+                                     GFP_KERNEL);
-+      if (!ppe->foe)
-+              return -ENOMEM;
-+
-+      ppe->eth = eth;
-+      eth->ppe = ppe;
-+
-+      ppe->foe_flow = devm_kzalloc(eth->dev,
-+                                   PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
-+                                   GFP_KERNEL);
-+      if (!ppe->foe_flow)
-+              return -ENOMEM;
-+
-+      return rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
-+}
-+
-+void airoha_ppe_deinit(struct airoha_eth *eth)
-+{
-+      struct airoha_npu *npu;
-+
-+      rcu_read_lock();
-+      npu = rcu_dereference(eth->npu);
-+      if (npu) {
-+              npu->ops.ppe_deinit(npu);
-+              airoha_npu_put(npu);
-+      }
-+      rcu_read_unlock();
-+
-+      rhashtable_destroy(&eth->flow_table);
-+}
---- a/drivers/net/ethernet/airoha/airoha_regs.h
-+++ b/drivers/net/ethernet/airoha/airoha_regs.h
-@@ -15,6 +15,7 @@
- #define CDM1_BASE                     0x0400
- #define GDM1_BASE                     0x0500
- #define PPE1_BASE                     0x0c00
-+#define PPE2_BASE                     0x1c00
- #define CDM2_BASE                     0x1400
- #define GDM2_BASE                     0x1500
-@@ -36,6 +37,7 @@
- #define FE_RST_GDM3_MBI_ARB_MASK      BIT(2)
- #define FE_RST_CORE_MASK              BIT(0)
-+#define REG_FE_FOE_TS                 0x0010
- #define REG_FE_WAN_MAC_H              0x0030
- #define REG_FE_LAN_MAC_H              0x0040
-@@ -192,11 +194,106 @@
- #define REG_FE_GDM_RX_ETH_L511_CNT_L(_n)      (GDM_BASE(_n) + 0x198)
- #define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n)     (GDM_BASE(_n) + 0x19c)
--#define REG_PPE1_TB_HASH_CFG          (PPE1_BASE + 0x250)
--#define PPE1_SRAM_TABLE_EN_MASK               BIT(0)
--#define PPE1_SRAM_HASH1_EN_MASK               BIT(8)
--#define PPE1_DRAM_TABLE_EN_MASK               BIT(16)
--#define PPE1_DRAM_HASH1_EN_MASK               BIT(24)
-+#define REG_PPE_GLO_CFG(_n)                   (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200)
-+#define PPE_GLO_CFG_BUSY_MASK                 BIT(31)
-+#define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK     BIT(9)
-+#define PPE_GLO_CFG_PSE_HASH_OFS_MASK         BIT(6)
-+#define PPE_GLO_CFG_PPE_BSWAP_MASK            BIT(5)
-+#define PPE_GLO_CFG_TTL_DROP_MASK             BIT(4)
-+#define PPE_GLO_CFG_IP4_CS_DROP_MASK          BIT(3)
-+#define PPE_GLO_CFG_IP4_L4_CS_DROP_MASK               BIT(2)
-+#define PPE_GLO_CFG_EN_MASK                   BIT(0)
-+
-+#define REG_PPE_PPE_FLOW_CFG(_n)              (((_n) ? PPE2_BASE : PPE1_BASE) + 0x204)
-+#define PPE_FLOW_CFG_IP6_HASH_GRE_KEY_MASK    BIT(20)
-+#define PPE_FLOW_CFG_IP4_HASH_GRE_KEY_MASK    BIT(19)
-+#define PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL_MASK BIT(18)
-+#define PPE_FLOW_CFG_IP4_NAT_FRAG_MASK                BIT(17)
-+#define PPE_FLOW_CFG_IP_PROTO_BLACKLIST_MASK  BIT(16)
-+#define PPE_FLOW_CFG_IP4_DSLITE_MASK          BIT(14)
-+#define PPE_FLOW_CFG_IP4_NAPT_MASK            BIT(13)
-+#define PPE_FLOW_CFG_IP4_NAT_MASK             BIT(12)
-+#define PPE_FLOW_CFG_IP6_6RD_MASK             BIT(10)
-+#define PPE_FLOW_CFG_IP6_5T_ROUTE_MASK                BIT(9)
-+#define PPE_FLOW_CFG_IP6_3T_ROUTE_MASK                BIT(8)
-+#define PPE_FLOW_CFG_IP4_UDP_FRAG_MASK                BIT(7)
-+#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK                BIT(6)
-+
-+#define REG_PPE_IP_PROTO_CHK(_n)              (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208)
-+#define PPE_IP_PROTO_CHK_IPV4_MASK            GENMASK(15, 0)
-+#define PPE_IP_PROTO_CHK_IPV6_MASK            GENMASK(31, 16)
-+
-+#define REG_PPE_TB_CFG(_n)                    (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c)
-+#define PPE_SRAM_TB_NUM_ENTRY_MASK            GENMASK(26, 24)
-+#define PPE_TB_CFG_KEEPALIVE_MASK             GENMASK(13, 12)
-+#define PPE_TB_CFG_AGE_TCP_FIN_MASK           BIT(11)
-+#define PPE_TB_CFG_AGE_UDP_MASK                       BIT(10)
-+#define PPE_TB_CFG_AGE_TCP_MASK                       BIT(9)
-+#define PPE_TB_CFG_AGE_UNBIND_MASK            BIT(8)
-+#define PPE_TB_CFG_AGE_NON_L4_MASK            BIT(7)
-+#define PPE_TB_CFG_AGE_PREBIND_MASK           BIT(6)
-+#define PPE_TB_CFG_SEARCH_MISS_MASK           GENMASK(5, 4)
-+#define PPE_TB_ENTRY_SIZE_MASK                        BIT(3)
-+#define PPE_DRAM_TB_NUM_ENTRY_MASK            GENMASK(2, 0)
-+
-+#define REG_PPE_TB_BASE(_n)                   (((_n) ? PPE2_BASE : PPE1_BASE) + 0x220)
-+
-+#define REG_PPE_BIND_RATE(_n)                 (((_n) ? PPE2_BASE : PPE1_BASE) + 0x228)
-+#define PPE_BIND_RATE_L2B_BIND_MASK           GENMASK(31, 16)
-+#define PPE_BIND_RATE_BIND_MASK                       GENMASK(15, 0)
-+
-+#define REG_PPE_BIND_LIMIT0(_n)                       (((_n) ? PPE2_BASE : PPE1_BASE) + 0x22c)
-+#define PPE_BIND_LIMIT0_HALF_MASK             GENMASK(29, 16)
-+#define PPE_BIND_LIMIT0_QUARTER_MASK          GENMASK(13, 0)
-+
-+#define REG_PPE_BIND_LIMIT1(_n)                       (((_n) ? PPE2_BASE : PPE1_BASE) + 0x230)
-+#define PPE_BIND_LIMIT1_NON_L4_MASK           GENMASK(23, 16)
-+#define PPE_BIND_LIMIT1_FULL_MASK             GENMASK(13, 0)
-+
-+#define REG_PPE_BND_AGE0(_n)                  (((_n) ? PPE2_BASE : PPE1_BASE) + 0x23c)
-+#define PPE_BIND_AGE0_DELTA_NON_L4            GENMASK(30, 16)
-+#define PPE_BIND_AGE0_DELTA_UDP                       GENMASK(14, 0)
-+
-+#define REG_PPE_UNBIND_AGE(_n)                        (((_n) ? PPE2_BASE : PPE1_BASE) + 0x238)
-+#define PPE_UNBIND_AGE_MIN_PACKETS_MASK               GENMASK(31, 16)
-+#define PPE_UNBIND_AGE_DELTA_MASK             GENMASK(7, 0)
-+
-+#define REG_PPE_BND_AGE1(_n)                  (((_n) ? PPE2_BASE : PPE1_BASE) + 0x240)
-+#define PPE_BIND_AGE1_DELTA_TCP_FIN           GENMASK(30, 16)
-+#define PPE_BIND_AGE1_DELTA_TCP                       GENMASK(14, 0)
-+
-+#define REG_PPE_HASH_SEED(_n)                 (((_n) ? PPE2_BASE : PPE1_BASE) + 0x244)
-+#define PPE_HASH_SEED                         0x12345678
-+
-+#define REG_PPE_DFT_CPORT0(_n)                        (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
-+
-+#define REG_PPE_DFT_CPORT1(_n)                        (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
-+
-+#define REG_PPE_TB_HASH_CFG(_n)                       (((_n) ? PPE2_BASE : PPE1_BASE) + 0x250)
-+#define PPE_DRAM_HASH1_MODE_MASK              GENMASK(31, 28)
-+#define PPE_DRAM_HASH1_EN_MASK                        BIT(24)
-+#define PPE_DRAM_HASH0_MODE_MASK              GENMASK(23, 20)
-+#define PPE_DRAM_TABLE_EN_MASK                        BIT(16)
-+#define PPE_SRAM_HASH1_MODE_MASK              GENMASK(15, 12)
-+#define PPE_SRAM_HASH1_EN_MASK                        BIT(8)
-+#define PPE_SRAM_HASH0_MODE_MASK              GENMASK(7, 4)
-+#define PPE_SRAM_TABLE_EN_MASK                        BIT(0)
-+
-+#define REG_PPE_MTU_BASE(_n)                  (((_n) ? PPE2_BASE : PPE1_BASE) + 0x304)
-+#define REG_PPE_MTU(_m, _n)                   (REG_PPE_MTU_BASE(_m) + ((_n) << 2))
-+#define FP1_EGRESS_MTU_MASK                   GENMASK(29, 16)
-+#define FP0_EGRESS_MTU_MASK                   GENMASK(13, 0)
-+
-+#define REG_PPE_RAM_CTRL(_n)                  (((_n) ? PPE2_BASE : PPE1_BASE) + 0x31c)
-+#define PPE_SRAM_CTRL_ACK_MASK                        BIT(31)
-+#define PPE_SRAM_CTRL_DUAL_SUCESS_MASK                BIT(30)
-+#define PPE_SRAM_CTRL_ENTRY_MASK              GENMASK(23, 8)
-+#define PPE_SRAM_WR_DUAL_DIRECTION_MASK               BIT(2)
-+#define PPE_SRAM_CTRL_WR_MASK                 BIT(1)
-+#define PPE_SRAM_CTRL_REQ_MASK                        BIT(0)
-+
-+#define REG_PPE_RAM_BASE(_n)                  (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
-+#define REG_PPE_RAM_ENTRY(_m, _n)             (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
- #define REG_FE_GDM_TX_OK_PKT_CNT_H(_n)                (GDM_BASE(_n) + 0x280)
- #define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n)               (GDM_BASE(_n) + 0x284)
diff --git a/target/linux/airoha/patches-6.6/048-14-v6.15-net-airoha-Add-loopback-support-for-GDM2.patch b/target/linux/airoha/patches-6.6/048-14-v6.15-net-airoha-Add-loopback-support-for-GDM2.patch
deleted file mode 100644 (file)
index 224fe04..0000000
+++ /dev/null
@@ -1,210 +0,0 @@
-From 9cd451d414f6e29f507a216fb3b19fa68c011f8c Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:22 +0100
-Subject: [PATCH 14/15] net: airoha: Add loopback support for GDM2
-
-Enable hw redirection for traffic received on GDM2 port to GDM{3,4}.
-This is required to apply Qdisc offloading (HTB or ETS) for traffic to
-and from GDM{3,4} port.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c  | 71 ++++++++++++++++++++++-
- drivers/net/ethernet/airoha/airoha_eth.h  |  7 +++
- drivers/net/ethernet/airoha/airoha_ppe.c  | 12 ++--
- drivers/net/ethernet/airoha/airoha_regs.h | 29 +++++++++
- 4 files changed, 111 insertions(+), 8 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -1589,14 +1589,81 @@ static int airoha_dev_set_macaddr(struct
-       return 0;
- }
-+static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
-+{
-+      u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4;
-+      struct airoha_eth *eth = port->qdma->eth;
-+      u32 chan = port->id == 3 ? 4 : 0;
-+
-+      /* Forward the traffic to the proper GDM port */
-+      airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port);
-+      airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC);
-+
-+      /* Enable GDM2 loopback */
-+      airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff);
-+      airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff);
-+      airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2),
-+                    LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
-+                    FIELD_PREP(LPBK_CHAN_MASK, chan) | LPBK_EN_MASK);
-+      airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2),
-+                    GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
-+                    FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
-+                    FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
-+
-+      /* Disable VIP and IFC for GDM2 */
-+      airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2));
-+      airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2));
-+
-+      if (port->id == 3) {
-+              /* FIXME: handle XSI_PCE1_PORT */
-+              airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0),  0x5500);
-+              airoha_fe_rmw(eth, REG_FE_WAN_PORT,
-+                            WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
-+                            FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT));
-+              airoha_fe_rmw(eth,
-+                            REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3),
-+                            SP_CPORT_PCIE0_MASK,
-+                            FIELD_PREP(SP_CPORT_PCIE0_MASK,
-+                                       FE_PSE_PORT_CDM2));
-+      } else {
-+              /* FIXME: handle XSI_USB_PORT */
-+              airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6,
-+                            FC_ID_OF_SRC_PORT24_MASK,
-+                            FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2));
-+              airoha_fe_rmw(eth, REG_FE_WAN_PORT,
-+                            WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
-+                            FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT));
-+              airoha_fe_rmw(eth,
-+                            REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3),
-+                            SP_CPORT_ETH_MASK,
-+                            FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2));
-+      }
-+}
-+
- static int airoha_dev_init(struct net_device *dev)
- {
-       struct airoha_gdm_port *port = netdev_priv(dev);
-       struct airoha_eth *eth = port->qdma->eth;
-+      u32 pse_port;
-       airoha_set_macaddr(port, dev->dev_addr);
--      airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id),
--                                  FE_PSE_PORT_PPE1);
-+
-+      switch (port->id) {
-+      case 3:
-+      case 4:
-+              /* If GDM2 is active we can't enable loopback */
-+              if (!eth->ports[1])
-+                      airhoha_set_gdm2_loopback(port);
-+              fallthrough;
-+      case 2:
-+              pse_port = FE_PSE_PORT_PPE2;
-+              break;
-+      default:
-+              pse_port = FE_PSE_PORT_PPE1;
-+              break;
-+      }
-+
-+      airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port);
-       return 0;
- }
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -68,6 +68,13 @@ enum {
- };
- enum {
-+      HSGMII_LAN_PCIE0_SRCPORT = 0x16,
-+      HSGMII_LAN_PCIE1_SRCPORT,
-+      HSGMII_LAN_ETH_SRCPORT,
-+      HSGMII_LAN_USB_SRCPORT,
-+};
-+
-+enum {
-       XSI_PCIE0_VIP_PORT_MASK = BIT(22),
-       XSI_PCIE1_VIP_PORT_MASK = BIT(23),
-       XSI_USB_VIP_PORT_MASK   = BIT(25),
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -216,7 +216,8 @@ static int airoha_ppe_foe_entry_prepare(
-             AIROHA_FOE_IB1_BIND_TTL;
-       hwe->ib1 = val;
--      val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
-+      val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
-+            AIROHA_FOE_IB2_PSE_QOS;
-       if (dsa_port >= 0)
-               val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
-@@ -224,14 +225,13 @@ static int airoha_ppe_foe_entry_prepare(
-               struct airoha_gdm_port *port = netdev_priv(dev);
-               u8 pse_port;
--              pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
-+              if (dsa_port >= 0)
-+                      pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
-+              else
-+                      pse_port = 2; /* uplink relies on GDM2 loopback */
-               val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
-       }
--      /* FIXME: implement QoS support setting pse_port to 2 (loopback)
--       * for uplink and setting qos bit in ib2
--       */
--
-       if (is_multicast_ether_addr(data->eth.h_dest))
-               val |= AIROHA_FOE_IB2_MULTICAST;
---- a/drivers/net/ethernet/airoha/airoha_regs.h
-+++ b/drivers/net/ethernet/airoha/airoha_regs.h
-@@ -38,6 +38,12 @@
- #define FE_RST_CORE_MASK              BIT(0)
- #define REG_FE_FOE_TS                 0x0010
-+
-+#define REG_FE_WAN_PORT                       0x0024
-+#define WAN1_EN_MASK                  BIT(16)
-+#define WAN1_MASK                     GENMASK(12, 8)
-+#define WAN0_MASK                     GENMASK(4, 0)
-+
- #define REG_FE_WAN_MAC_H              0x0030
- #define REG_FE_LAN_MAC_H              0x0040
-@@ -126,6 +132,7 @@
- #define GDM_IP4_CKSUM                 BIT(22)
- #define GDM_TCP_CKSUM                 BIT(21)
- #define GDM_UDP_CKSUM                 BIT(20)
-+#define GDM_STRIP_CRC                 BIT(16)
- #define GDM_UCFQ_MASK                 GENMASK(15, 12)
- #define GDM_BCFQ_MASK                 GENMASK(11, 8)
- #define GDM_MCFQ_MASK                 GENMASK(7, 4)
-@@ -139,6 +146,16 @@
- #define GDM_SHORT_LEN_MASK            GENMASK(13, 0)
- #define GDM_LONG_LEN_MASK             GENMASK(29, 16)
-+#define REG_GDM_LPBK_CFG(_n)          (GDM_BASE(_n) + 0x1c)
-+#define LPBK_GAP_MASK                 GENMASK(31, 24)
-+#define LPBK_LEN_MASK                 GENMASK(23, 10)
-+#define LPBK_CHAN_MASK                        GENMASK(8, 4)
-+#define LPBK_MODE_MASK                        GENMASK(3, 1)
-+#define LPBK_EN_MASK                  BIT(0)
-+
-+#define REG_GDM_TXCHN_EN(_n)          (GDM_BASE(_n) + 0x24)
-+#define REG_GDM_RXCHN_EN(_n)          (GDM_BASE(_n) + 0x28)
-+
- #define REG_FE_CPORT_CFG              (GDM1_BASE + 0x40)
- #define FE_CPORT_PAD                  BIT(26)
- #define FE_CPORT_PORT_XFC_MASK                BIT(25)
-@@ -351,6 +368,18 @@
- #define REG_MC_VLAN_DATA              0x2108
-+#define REG_SP_DFT_CPORT(_n)          (0x20e0 + ((_n) << 2))
-+#define SP_CPORT_PCIE1_MASK           GENMASK(31, 28)
-+#define SP_CPORT_PCIE0_MASK           GENMASK(27, 24)
-+#define SP_CPORT_USB_MASK             GENMASK(7, 4)
-+#define SP_CPORT_ETH_MASK             GENMASK(7, 4)
-+
-+#define REG_SRC_PORT_FC_MAP6          0x2298
-+#define FC_ID_OF_SRC_PORT27_MASK      GENMASK(28, 24)
-+#define FC_ID_OF_SRC_PORT26_MASK      GENMASK(20, 16)
-+#define FC_ID_OF_SRC_PORT25_MASK      GENMASK(12, 8)
-+#define FC_ID_OF_SRC_PORT24_MASK      GENMASK(4, 0)
-+
- #define REG_CDM5_RX_OQ1_DROP_CNT      0x29d4
- /* QDMA */
diff --git a/target/linux/airoha/patches-6.6/048-15-v6.15-net-airoha-Introduce-PPE-debugfs-support.patch b/target/linux/airoha/patches-6.6/048-15-v6.15-net-airoha-Introduce-PPE-debugfs-support.patch
deleted file mode 100644 (file)
index 50d7fa1..0000000
+++ /dev/null
@@ -1,291 +0,0 @@
-From 3fe15c640f3808c3faf235553c67c867d1389e5c Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 28 Feb 2025 11:54:23 +0100
-Subject: [PATCH 15/15] net: airoha: Introduce PPE debugfs support
-
-Similar to PPE support for Mediatek devices, introduce PPE debugfs
-in order to dump binded and unbinded flows.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/Makefile          |   1 +
- drivers/net/ethernet/airoha/airoha_eth.h      |  14 ++
- drivers/net/ethernet/airoha/airoha_ppe.c      |  17 +-
- .../net/ethernet/airoha/airoha_ppe_debugfs.c  | 181 ++++++++++++++++++
- 4 files changed, 209 insertions(+), 4 deletions(-)
- create mode 100644 drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
-
---- a/drivers/net/ethernet/airoha/Makefile
-+++ b/drivers/net/ethernet/airoha/Makefile
-@@ -5,4 +5,5 @@
- obj-$(CONFIG_NET_AIROHA) += airoha-eth.o
- airoha-eth-y := airoha_eth.o airoha_ppe.o
-+airoha-eth-$(CONFIG_DEBUG_FS) += airoha_ppe_debugfs.o
- obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -7,6 +7,7 @@
- #ifndef AIROHA_ETH_H
- #define AIROHA_ETH_H
-+#include <linux/debugfs.h>
- #include <linux/etherdevice.h>
- #include <linux/iopoll.h>
- #include <linux/kernel.h>
-@@ -480,6 +481,8 @@ struct airoha_ppe {
-       struct hlist_head *foe_flow;
-       u16 foe_check_time[PPE_NUM_ENTRIES];
-+
-+      struct dentry *debugfs_dir;
- };
- struct airoha_eth {
-@@ -533,5 +536,16 @@ int airoha_ppe_setup_tc_block_cb(enum tc
-                                void *cb_priv);
- int airoha_ppe_init(struct airoha_eth *eth);
- void airoha_ppe_deinit(struct airoha_eth *eth);
-+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
-+                                                u32 hash);
-+
-+#if CONFIG_DEBUG_FS
-+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
-+#else
-+static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
-+{
-+      return 0;
-+}
-+#endif
- #endif /* AIROHA_ETH_H */
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -390,8 +390,8 @@ static u32 airoha_ppe_foe_get_entry_hash
-       return hash;
- }
--static struct airoha_foe_entry *
--airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, u32 hash)
-+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
-+                                                u32 hash)
- {
-       if (hash < PPE_SRAM_NUM_ENTRIES) {
-               u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
-@@ -861,7 +861,7 @@ void airoha_ppe_check_skb(struct airoha_
- int airoha_ppe_init(struct airoha_eth *eth)
- {
-       struct airoha_ppe *ppe;
--      int foe_size;
-+      int foe_size, err;
-       ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
-       if (!ppe)
-@@ -882,7 +882,15 @@ int airoha_ppe_init(struct airoha_eth *e
-       if (!ppe->foe_flow)
-               return -ENOMEM;
--      return rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
-+      err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
-+      if (err)
-+              return err;
-+
-+      err = airoha_ppe_debugfs_init(ppe);
-+      if (err)
-+              rhashtable_destroy(&eth->flow_table);
-+
-+      return err;
- }
- void airoha_ppe_deinit(struct airoha_eth *eth)
-@@ -898,4 +906,5 @@ void airoha_ppe_deinit(struct airoha_eth
-       rcu_read_unlock();
-       rhashtable_destroy(&eth->flow_table);
-+      debugfs_remove(eth->ppe->debugfs_dir);
- }
---- /dev/null
-+++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
-@@ -0,0 +1,181 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2025 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ */
-+
-+#include "airoha_eth.h"
-+
-+static void airoha_debugfs_ppe_print_tuple(struct seq_file *m,
-+                                         void *src_addr, void *dest_addr,
-+                                         u16 *src_port, u16 *dest_port,
-+                                         bool ipv6)
-+{
-+      __be32 n_addr[IPV6_ADDR_WORDS];
-+
-+      if (ipv6) {
-+              ipv6_addr_cpu_to_be32(n_addr, src_addr);
-+              seq_printf(m, "%pI6", n_addr);
-+      } else {
-+              seq_printf(m, "%pI4h", src_addr);
-+      }
-+      if (src_port)
-+              seq_printf(m, ":%d", *src_port);
-+
-+      seq_puts(m, "->");
-+
-+      if (ipv6) {
-+              ipv6_addr_cpu_to_be32(n_addr, dest_addr);
-+              seq_printf(m, "%pI6", n_addr);
-+      } else {
-+              seq_printf(m, "%pI4h", dest_addr);
-+      }
-+      if (dest_port)
-+              seq_printf(m, ":%d", *dest_port);
-+}
-+
-+static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
-+                                     bool bind)
-+{
-+      static const char *const ppe_type_str[] = {
-+              [PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
-+              [PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
-+              [PPE_PKT_TYPE_BRIDGE] = "L2B",
-+              [PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
-+              [PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
-+              [PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
-+              [PPE_PKT_TYPE_IPV6_6RD] = "6RD",
-+      };
-+      static const char *const ppe_state_str[] = {
-+              [AIROHA_FOE_STATE_INVALID] = "INV",
-+              [AIROHA_FOE_STATE_UNBIND] = "UNB",
-+              [AIROHA_FOE_STATE_BIND] = "BND",
-+              [AIROHA_FOE_STATE_FIN] = "FIN",
-+      };
-+      struct airoha_ppe *ppe = m->private;
-+      int i;
-+
-+      for (i = 0; i < PPE_NUM_ENTRIES; i++) {
-+              const char *state_str, *type_str = "UNKNOWN";
-+              void *src_addr = NULL, *dest_addr = NULL;
-+              u16 *src_port = NULL, *dest_port = NULL;
-+              struct airoha_foe_mac_info_common *l2;
-+              unsigned char h_source[ETH_ALEN] = {};
-+              unsigned char h_dest[ETH_ALEN];
-+              struct airoha_foe_entry *hwe;
-+              u32 type, state, ib2, data;
-+              bool ipv6 = false;
-+
-+              hwe = airoha_ppe_foe_get_entry(ppe, i);
-+              if (!hwe)
-+                      continue;
-+
-+              state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
-+              if (!state)
-+                      continue;
-+
-+              if (bind && state != AIROHA_FOE_STATE_BIND)
-+                      continue;
-+
-+              state_str = ppe_state_str[state % ARRAY_SIZE(ppe_state_str)];
-+              type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
-+              if (type < ARRAY_SIZE(ppe_type_str) && ppe_type_str[type])
-+                      type_str = ppe_type_str[type];
-+
-+              seq_printf(m, "%05x %s %7s", i, state_str, type_str);
-+
-+              switch (type) {
-+              case PPE_PKT_TYPE_IPV4_HNAPT:
-+              case PPE_PKT_TYPE_IPV4_DSLITE:
-+                      src_port = &hwe->ipv4.orig_tuple.src_port;
-+                      dest_port = &hwe->ipv4.orig_tuple.dest_port;
-+                      fallthrough;
-+              case PPE_PKT_TYPE_IPV4_ROUTE:
-+                      src_addr = &hwe->ipv4.orig_tuple.src_ip;
-+                      dest_addr = &hwe->ipv4.orig_tuple.dest_ip;
-+                      break;
-+              case PPE_PKT_TYPE_IPV6_ROUTE_5T:
-+                      src_port = &hwe->ipv6.src_port;
-+                      dest_port = &hwe->ipv6.dest_port;
-+                      fallthrough;
-+              case PPE_PKT_TYPE_IPV6_ROUTE_3T:
-+              case PPE_PKT_TYPE_IPV6_6RD:
-+                      src_addr = &hwe->ipv6.src_ip;
-+                      dest_addr = &hwe->ipv6.dest_ip;
-+                      ipv6 = true;
-+                      break;
-+              default:
-+                      break;
-+              }
-+
-+              if (src_addr && dest_addr) {
-+                      seq_puts(m, " orig=");
-+                      airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
-+                                                     src_port, dest_port, ipv6);
-+              }
-+
-+              switch (type) {
-+              case PPE_PKT_TYPE_IPV4_HNAPT:
-+              case PPE_PKT_TYPE_IPV4_DSLITE:
-+                      src_port = &hwe->ipv4.new_tuple.src_port;
-+                      dest_port = &hwe->ipv4.new_tuple.dest_port;
-+                      fallthrough;
-+              case PPE_PKT_TYPE_IPV4_ROUTE:
-+                      src_addr = &hwe->ipv4.new_tuple.src_ip;
-+                      dest_addr = &hwe->ipv4.new_tuple.dest_ip;
-+                      seq_puts(m, " new=");
-+                      airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
-+                                                     src_port, dest_port,
-+                                                     ipv6);
-+                      break;
-+              default:
-+                      break;
-+              }
-+
-+              if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
-+                      data = hwe->ipv6.data;
-+                      ib2 = hwe->ipv6.ib2;
-+                      l2 = &hwe->ipv6.l2;
-+              } else {
-+                      data = hwe->ipv4.data;
-+                      ib2 = hwe->ipv4.ib2;
-+                      l2 = &hwe->ipv4.l2.common;
-+                      *((__be16 *)&h_source[4]) =
-+                              cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
-+              }
-+
-+              *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
-+              *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
-+              *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
-+
-+              seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
-+                            " vlan=%d,%d ib1=%08x ib2=%08x\n",
-+                         h_source, h_dest, l2->etype, data,
-+                         l2->vlan1, l2->vlan2, hwe->ib1, ib2);
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
-+{
-+      return airoha_ppe_debugfs_foe_show(m, private, false);
-+}
-+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_all);
-+
-+static int airoha_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
-+{
-+      return airoha_ppe_debugfs_foe_show(m, private, true);
-+}
-+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_bind);
-+
-+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
-+{
-+      ppe->debugfs_dir = debugfs_create_dir("ppe", NULL);
-+      debugfs_create_file("entries", 0444, ppe->debugfs_dir, ppe,
-+                          &airoha_ppe_debugfs_foe_all_fops);
-+      debugfs_create_file("bind", 0444, ppe->debugfs_dir, ppe,
-+                          &airoha_ppe_debugfs_foe_bind_fops);
-+
-+      return 0;
-+}
diff --git a/target/linux/airoha/patches-6.6/049-01-v6.16-thermal-drivers-Add-support-for-Airoha-EN7581-therma.patch b/target/linux/airoha/patches-6.6/049-01-v6.16-thermal-drivers-Add-support-for-Airoha-EN7581-therma.patch
deleted file mode 100644 (file)
index e168cda..0000000
+++ /dev/null
@@ -1,550 +0,0 @@
-From 42de37f40e1bc818df216dfa0918c114cfb5941d Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Sun, 11 May 2025 20:49:55 +0200
-Subject: [PATCH] thermal/drivers: Add support for Airoha EN7581 thermal sensor
-
-Add support for Airoha EN7581 thermal sensor. This provide support for
-reading the CPU or SoC Package sensor and to setup trip points for hot
-and critical condition. An interrupt is fired to react on this and
-doesn't require passive poll to read the temperature.
-
-The thermal regs provide a way to read the ADC value from an external
-register placed in the Chip SCU regs. Monitor will read this value and
-fire an interrupt if the trip condition configured is reached.
-
-The Thermal Trip and Interrupt logic is conceptually similar to Mediatek
-LVTS Thermal but differ in register mapping and actual function/bug
-workaround. The implementation only share some register names but from
-functionality observation it's very different and used only for the
-basic function of periodically poll the temp and trip the interrupt.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Link: https://lore.kernel.org/r/20250511185003.3754495-2-ansuelsmth@gmail.com
-Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
----
- drivers/thermal/Kconfig          |   9 +
- drivers/thermal/Makefile         |   1 +
- drivers/thermal/airoha_thermal.c | 489 +++++++++++++++++++++++++++++++
- 3 files changed, 499 insertions(+)
- create mode 100644 drivers/thermal/airoha_thermal.c
-
---- a/drivers/thermal/Kconfig
-+++ b/drivers/thermal/Kconfig
-@@ -317,6 +317,15 @@ config QORIQ_THERMAL
-         cpufreq is used as the cooling device to throttle CPUs when the
-         passive trip is crossed.
-+config AIROHA_THERMAL
-+      tristate "Airoha thermal sensor driver"
-+      depends on ARCH_AIROHA || COMPILE_TEST
-+      depends on MFD_SYSCON
-+      depends on OF
-+      help
-+        Enable this to plug the Airoha thermal sensor driver into the Linux
-+        thermal framework.
-+
- config SPEAR_THERMAL
-       tristate "SPEAr thermal sensor driver"
-       depends on PLAT_SPEAR || COMPILE_TEST
---- a/drivers/thermal/Makefile
-+++ b/drivers/thermal/Makefile
-@@ -34,6 +34,7 @@ obj-$(CONFIG_K3_THERMAL)     += k3_bandgap.o
- # platform thermal drivers
- obj-y                         += broadcom/
- obj-$(CONFIG_THERMAL_MMIO)            += thermal_mmio.o
-+obj-$(CONFIG_AIROHA_THERMAL)  += airoha_thermal.o
- obj-$(CONFIG_SPEAR_THERMAL)   += spear_thermal.o
- obj-$(CONFIG_SUN8I_THERMAL)     += sun8i_thermal.o
- obj-$(CONFIG_ROCKCHIP_THERMAL)        += rockchip_thermal.o
---- /dev/null
-+++ b/drivers/thermal/airoha_thermal.c
-@@ -0,0 +1,489 @@
-+// SPDX-License-Identifier: GPL-2.0-or-later
-+
-+#include <linux/module.h>
-+#include <linux/bitfield.h>
-+#include <linux/delay.h>
-+#include <linux/interrupt.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/of.h>
-+#include <linux/of_address.h>
-+#include <linux/platform_device.h>
-+#include <linux/regmap.h>
-+#include <linux/thermal.h>
-+
-+/* SCU regs */
-+#define EN7581_PLLRG_PROTECT                  0x268
-+#define EN7581_PWD_TADC                               0x2ec
-+#define   EN7581_MUX_TADC                     GENMASK(3, 1)
-+#define EN7581_DOUT_TADC                      0x2f8
-+#define   EN7581_DOUT_TADC_MASK                       GENMASK(15, 0)
-+
-+/* PTP_THERMAL regs */
-+#define EN7581_TEMPMONCTL0                    0x800
-+#define   EN7581_SENSE3_EN                    BIT(3)
-+#define   EN7581_SENSE2_EN                    BIT(2)
-+#define   EN7581_SENSE1_EN                    BIT(1)
-+#define   EN7581_SENSE0_EN                    BIT(0)
-+#define EN7581_TEMPMONCTL1                    0x804
-+/* period unit calculated in BUS clock * 256 scaling-up */
-+#define   EN7581_PERIOD_UNIT                  GENMASK(9, 0)
-+#define EN7581_TEMPMONCTL2                    0x808
-+#define   EN7581_FILT_INTERVAL                        GENMASK(25, 16)
-+#define   EN7581_SEN_INTERVAL                 GENMASK(9, 0)
-+#define EN7581_TEMPMONINT                     0x80C
-+#define   EN7581_STAGE3_INT_EN                        BIT(31)
-+#define   EN7581_STAGE2_INT_EN                        BIT(30)
-+#define   EN7581_STAGE1_INT_EN                        BIT(29)
-+#define   EN7581_FILTER_INT_EN_3              BIT(28)
-+#define   EN7581_IMMD_INT_EN3                 BIT(27)
-+#define   EN7581_NOHOTINTEN3                  BIT(26)
-+#define   EN7581_HOFSINTEN3                   BIT(25)
-+#define   EN7581_LOFSINTEN3                   BIT(24)
-+#define   EN7581_HINTEN3                      BIT(23)
-+#define   EN7581_CINTEN3                      BIT(22)
-+#define   EN7581_FILTER_INT_EN_2              BIT(21)
-+#define   EN7581_FILTER_INT_EN_1              BIT(20)
-+#define   EN7581_FILTER_INT_EN_0              BIT(19)
-+#define   EN7581_IMMD_INT_EN2                 BIT(18)
-+#define   EN7581_IMMD_INT_EN1                 BIT(17)
-+#define   EN7581_IMMD_INT_EN0                 BIT(16)
-+#define   EN7581_TIME_OUT_INT_EN              BIT(15)
-+#define   EN7581_NOHOTINTEN2                  BIT(14)
-+#define   EN7581_HOFSINTEN2                   BIT(13)
-+#define   EN7581_LOFSINTEN2                   BIT(12)
-+#define   EN7581_HINTEN2                      BIT(11)
-+#define   EN7581_CINTEN2                      BIT(10)
-+#define   EN7581_NOHOTINTEN1                  BIT(9)
-+#define   EN7581_HOFSINTEN1                   BIT(8)
-+#define   EN7581_LOFSINTEN1                   BIT(7)
-+#define   EN7581_HINTEN1                      BIT(6)
-+#define   EN7581_CINTEN1                      BIT(5)
-+#define   EN7581_NOHOTINTEN0                  BIT(4)
-+/* Similar to COLD and HOT also these seems to be swapped in documentation */
-+#define   EN7581_LOFSINTEN0                   BIT(3) /* In documentation: BIT(2) */
-+#define   EN7581_HOFSINTEN0                   BIT(2) /* In documentation: BIT(3) */
-+/* It seems documentation have these swapped as the HW
-+ * - Fire BIT(1) when lower than EN7581_COLD_THRE
-+ * - Fire BIT(0) and BIT(5) when higher than EN7581_HOT2NORMAL_THRE or
-+ *     EN7581_HOT_THRE
-+ */
-+#define   EN7581_CINTEN0                      BIT(1) /* In documentation: BIT(0) */
-+#define   EN7581_HINTEN0                      BIT(0) /* In documentation: BIT(1) */
-+#define EN7581_TEMPMONINTSTS                  0x810
-+#define   EN7581_STAGE3_INT_STAT              BIT(31)
-+#define   EN7581_STAGE2_INT_STAT              BIT(30)
-+#define   EN7581_STAGE1_INT_STAT              BIT(29)
-+#define   EN7581_FILTER_INT_STAT_3            BIT(28)
-+#define   EN7581_IMMD_INT_STS3                        BIT(27)
-+#define   EN7581_NOHOTINTSTS3                 BIT(26)
-+#define   EN7581_HOFSINTSTS3                  BIT(25)
-+#define   EN7581_LOFSINTSTS3                  BIT(24)
-+#define   EN7581_HINTSTS3                     BIT(23)
-+#define   EN7581_CINTSTS3                     BIT(22)
-+#define   EN7581_FILTER_INT_STAT_2            BIT(21)
-+#define   EN7581_FILTER_INT_STAT_1            BIT(20)
-+#define   EN7581_FILTER_INT_STAT_0            BIT(19)
-+#define   EN7581_IMMD_INT_STS2                        BIT(18)
-+#define   EN7581_IMMD_INT_STS1                        BIT(17)
-+#define   EN7581_IMMD_INT_STS0                        BIT(16)
-+#define   EN7581_TIME_OUT_INT_STAT            BIT(15)
-+#define   EN7581_NOHOTINTSTS2                 BIT(14)
-+#define   EN7581_HOFSINTSTS2                  BIT(13)
-+#define   EN7581_LOFSINTSTS2                  BIT(12)
-+#define   EN7581_HINTSTS2                     BIT(11)
-+#define   EN7581_CINTSTS2                     BIT(10)
-+#define   EN7581_NOHOTINTSTS1                 BIT(9)
-+#define   EN7581_HOFSINTSTS1                  BIT(8)
-+#define   EN7581_LOFSINTSTS1                  BIT(7)
-+#define   EN7581_HINTSTS1                     BIT(6)
-+#define   EN7581_CINTSTS1                     BIT(5)
-+#define   EN7581_NOHOTINTSTS0                 BIT(4)
-+/* Similar to COLD and HOT also these seems to be swapped in documentation */
-+#define   EN7581_LOFSINTSTS0                  BIT(3) /* In documentation: BIT(2) */
-+#define   EN7581_HOFSINTSTS0                  BIT(2) /* In documentation: BIT(3) */
-+/* It seems documentation have these swapped as the HW
-+ * - Fire BIT(1) when lower than EN7581_COLD_THRE
-+ * - Fire BIT(0) and BIT(5) when higher than EN7581_HOT2NORMAL_THRE or
-+ *     EN7581_HOT_THRE
-+ *
-+ * To clear things, we swap the define but we keep them documented here.
-+ */
-+#define   EN7581_CINTSTS0                     BIT(1) /* In documentation: BIT(0) */
-+#define   EN7581_HINTSTS0                     BIT(0) /* In documentation: BIT(1)*/
-+/* Monitor will take the bigger threshold between HOT2NORMAL and HOT
-+ * and will fire both HOT2NORMAL and HOT interrupt when higher than the 2
-+ *
-+ * It has also been observed that not setting HOT2NORMAL makes the monitor
-+ * treat COLD threshold as HOT2NORMAL.
-+ */
-+#define EN7581_TEMPH2NTHRE                    0x824
-+/* It seems HOT2NORMAL is actually NORMAL2HOT */
-+#define   EN7581_HOT2NORMAL_THRE              GENMASK(11, 0)
-+#define EN7581_TEMPHTHRE                      0x828
-+#define   EN7581_HOT_THRE                     GENMASK(11, 0)
-+/* Monitor will use this as HOT2NORMAL (fire interrupt when lower than...)*/
-+#define EN7581_TEMPCTHRE                      0x82c
-+#define   EN7581_COLD_THRE                    GENMASK(11, 0)
-+/* Also LOW and HIGH offset register are swapped */
-+#define EN7581_TEMPOFFSETL                    0x830 /* In documentation: 0x834 */
-+#define   EN7581_LOW_OFFSET                   GENMASK(11, 0)
-+#define EN7581_TEMPOFFSETH                    0x834 /* In documentation: 0x830 */
-+#define   EN7581_HIGH_OFFSET                  GENMASK(11, 0)
-+#define EN7581_TEMPMSRCTL0                    0x838
-+#define   EN7581_MSRCTL3                      GENMASK(11, 9)
-+#define   EN7581_MSRCTL2                      GENMASK(8, 6)
-+#define   EN7581_MSRCTL1                      GENMASK(5, 3)
-+#define   EN7581_MSRCTL0                      GENMASK(2, 0)
-+#define EN7581_TEMPADCVALIDADDR                       0x878
-+#define   EN7581_ADC_VALID_ADDR                       GENMASK(31, 0)
-+#define EN7581_TEMPADCVOLTADDR                        0x87c
-+#define   EN7581_ADC_VOLT_ADDR                        GENMASK(31, 0)
-+#define EN7581_TEMPRDCTRL                     0x880
-+/*
-+ * NOTICE: AHB have this set to 0 by default. Means that
-+ * the same addr is used for ADC volt and valid reading.
-+ * In such case, VALID ADDR is used and volt addr is ignored.
-+ */
-+#define   EN7581_RD_CTRL_DIFF                 BIT(0)
-+#define EN7581_TEMPADCVALIDMASK                       0x884
-+#define   EN7581_ADV_RD_VALID_POLARITY                BIT(5)
-+#define   EN7581_ADV_RD_VALID_POS             GENMASK(4, 0)
-+#define EN7581_TEMPADCVOLTAGESHIFT            0x888
-+#define   EN7581_ADC_VOLTAGE_SHIFT            GENMASK(4, 0)
-+/*
-+ * Same values for each CTL.
-+ * Can operate in:
-+ * - 1 sample
-+ * - 2 sample and make average of them
-+ * - 4,6,10,16 sample, drop max and min and make avgerage of them
-+ */
-+#define   EN7581_MSRCTL_1SAMPLE                       0x0
-+#define   EN7581_MSRCTL_AVG2SAMPLE            0x1
-+#define   EN7581_MSRCTL_4SAMPLE_MAX_MIX_AVG2  0x2
-+#define   EN7581_MSRCTL_6SAMPLE_MAX_MIX_AVG4  0x3
-+#define   EN7581_MSRCTL_10SAMPLE_MAX_MIX_AVG8 0x4
-+#define   EN7581_MSRCTL_18SAMPLE_MAX_MIX_AVG16        0x5
-+#define EN7581_TEMPAHBPOLL                    0x840
-+#define   EN7581_ADC_POLL_INTVL                       GENMASK(31, 0)
-+/* PTPSPARE0,2 reg are used to store efuse info for calibrated temp offset */
-+#define EN7581_EFUSE_TEMP_OFFSET_REG          0xf20 /* PTPSPARE0 */
-+#define   EN7581_EFUSE_TEMP_OFFSET            GENMASK(31, 16)
-+#define EN7581_PTPSPARE1                      0xf24 /* PTPSPARE1 */
-+#define EN7581_EFUSE_TEMP_CPU_SENSOR_REG      0xf28 /* PTPSPARE2 */
-+
-+#define EN7581_SLOPE_X100_DIO_DEFAULT         5645
-+#define EN7581_SLOPE_X100_DIO_AVS             5645
-+
-+#define EN7581_INIT_TEMP_CPK_X10              300
-+#define EN7581_INIT_TEMP_FTK_X10              620
-+#define EN7581_INIT_TEMP_NONK_X10             550
-+
-+#define EN7581_SCU_THERMAL_PROTECT_KEY                0x12
-+#define EN7581_SCU_THERMAL_MUX_DIODE1         0x7
-+
-+/* Convert temp to raw value as read from ADC ((((temp / 100) - init) * slope) / 1000) + offset */
-+#define TEMP_TO_RAW(priv, temp)                       ((((((temp) / 100) - (priv)->init_temp) * \
-+                                                (priv)->default_slope) / 1000) + \
-+                                               (priv)->default_offset)
-+
-+/* Convert raw to temp                                ((((temp - offset) * 1000) / slope + init) * 100) */
-+#define RAW_TO_TEMP(priv, raw)                        (((((raw) - (priv)->default_offset) * 1000) / \
-+                                                (priv)->default_slope + \
-+                                                (priv)->init_temp) * 100)
-+
-+#define AIROHA_MAX_SAMPLES                    6
-+
-+struct airoha_thermal_priv {
-+      void __iomem *base;
-+      struct regmap *chip_scu;
-+      struct resource scu_adc_res;
-+
-+      struct thermal_zone_device *tz;
-+      int init_temp;
-+      int default_slope;
-+      int default_offset;
-+};
-+
-+static int airoha_get_thermal_ADC(struct airoha_thermal_priv *priv)
-+{
-+      u32 val;
-+
-+      regmap_read(priv->chip_scu, EN7581_DOUT_TADC, &val);
-+      return FIELD_GET(EN7581_DOUT_TADC_MASK, val);
-+}
-+
-+static void airoha_init_thermal_ADC_mode(struct airoha_thermal_priv *priv)
-+{
-+      u32 adc_mux, pllrg;
-+
-+      /* Save PLLRG current value */
-+      regmap_read(priv->chip_scu, EN7581_PLLRG_PROTECT, &pllrg);
-+
-+      /* Give access to thermal regs */
-+      regmap_write(priv->chip_scu, EN7581_PLLRG_PROTECT, EN7581_SCU_THERMAL_PROTECT_KEY);
-+      adc_mux = FIELD_PREP(EN7581_MUX_TADC, EN7581_SCU_THERMAL_MUX_DIODE1);
-+      regmap_write(priv->chip_scu, EN7581_PWD_TADC, adc_mux);
-+
-+      /* Restore PLLRG value on exit */
-+      regmap_write(priv->chip_scu, EN7581_PLLRG_PROTECT, pllrg);
-+}
-+
-+static int airoha_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
-+{
-+      struct airoha_thermal_priv *priv = thermal_zone_device_priv(tz);
-+      int min_value, max_value, avg_value, value;
-+      int i;
-+
-+      avg_value = 0;
-+      min_value = INT_MAX;
-+      max_value = INT_MIN;
-+
-+      for (i = 0; i < AIROHA_MAX_SAMPLES; i++) {
-+              value = airoha_get_thermal_ADC(priv);
-+              min_value = min(value, min_value);
-+              max_value = max(value, max_value);
-+              avg_value += value;
-+      }
-+
-+      /* Drop min and max and average for the remaining sample */
-+      avg_value -= (min_value + max_value);
-+      avg_value /= AIROHA_MAX_SAMPLES - 2;
-+
-+      *temp = RAW_TO_TEMP(priv, avg_value);
-+      return 0;
-+}
-+
-+static int airoha_thermal_set_trips(struct thermal_zone_device *tz, int low,
-+                                  int high)
-+{
-+      struct airoha_thermal_priv *priv = thermal_zone_device_priv(tz);
-+      bool enable_monitor = false;
-+
-+      if (high != INT_MAX) {
-+              /* Validate high and clamp it a supported value */
-+              high = clamp_t(int, high, RAW_TO_TEMP(priv, 0),
-+                             RAW_TO_TEMP(priv, FIELD_MAX(EN7581_DOUT_TADC_MASK)));
-+
-+              /* We offset the high temp of 1°C to trigger correct event */
-+              writel(TEMP_TO_RAW(priv, high) >> 4,
-+                     priv->base + EN7581_TEMPOFFSETH);
-+
-+              enable_monitor = true;
-+      }
-+
-+      if (low != -INT_MAX) {
-+              /* Validate low and clamp it to a supported value */
-+              low = clamp_t(int, high, RAW_TO_TEMP(priv, 0),
-+                            RAW_TO_TEMP(priv, FIELD_MAX(EN7581_DOUT_TADC_MASK)));
-+
-+              /* We offset the low temp of 1°C to trigger correct event */
-+              writel(TEMP_TO_RAW(priv, low) >> 4,
-+                     priv->base + EN7581_TEMPOFFSETL);
-+
-+              enable_monitor = true;
-+      }
-+
-+      /* Enable sensor 0 monitor after trip are set */
-+      if (enable_monitor)
-+              writel(EN7581_SENSE0_EN, priv->base + EN7581_TEMPMONCTL0);
-+
-+      return 0;
-+}
-+
-+static const struct thermal_zone_device_ops thdev_ops = {
-+      .get_temp = airoha_thermal_get_temp,
-+      .set_trips = airoha_thermal_set_trips,
-+};
-+
-+static irqreturn_t airoha_thermal_irq(int irq, void *data)
-+{
-+      struct airoha_thermal_priv *priv = data;
-+      enum thermal_notify_event event;
-+      bool update = false;
-+      u32 status;
-+
-+      status = readl(priv->base + EN7581_TEMPMONINTSTS);
-+      switch (status & (EN7581_HOFSINTSTS0 | EN7581_LOFSINTSTS0)) {
-+      case EN7581_HOFSINTSTS0:
-+              event = THERMAL_TRIP_VIOLATED;
-+              update = true;
-+              break;
-+      case EN7581_LOFSINTSTS0:
-+              event = THERMAL_EVENT_UNSPECIFIED;
-+              update = true;
-+              break;
-+      default:
-+              /* Should be impossible as we enable only these Interrupt */
-+              break;
-+      }
-+
-+      /* Reset Interrupt */
-+      writel(status, priv->base + EN7581_TEMPMONINTSTS);
-+
-+      if (update)
-+              thermal_zone_device_update(priv->tz, event);
-+
-+      return IRQ_HANDLED;
-+}
-+
-+static void airoha_thermal_setup_adc_val(struct device *dev,
-+                                       struct airoha_thermal_priv *priv)
-+{
-+      u32 efuse_calib_info, cpu_sensor;
-+
-+      /* Setup thermal sensor to ADC mode and setup the mux to DIODE1 */
-+      airoha_init_thermal_ADC_mode(priv);
-+      /* sleep 10 ms for ADC to enable */
-+      usleep_range(10 * USEC_PER_MSEC, 11 * USEC_PER_MSEC);
-+
-+      efuse_calib_info = readl(priv->base + EN7581_EFUSE_TEMP_OFFSET_REG);
-+      if (efuse_calib_info) {
-+              priv->default_offset = FIELD_GET(EN7581_EFUSE_TEMP_OFFSET, efuse_calib_info);
-+              /* Different slope are applied if the sensor is used for CPU or for package */
-+              cpu_sensor = readl(priv->base + EN7581_EFUSE_TEMP_CPU_SENSOR_REG);
-+              if (cpu_sensor) {
-+                      priv->default_slope = EN7581_SLOPE_X100_DIO_DEFAULT;
-+                      priv->init_temp = EN7581_INIT_TEMP_FTK_X10;
-+              } else {
-+                      priv->default_slope = EN7581_SLOPE_X100_DIO_AVS;
-+                      priv->init_temp = EN7581_INIT_TEMP_CPK_X10;
-+              }
-+      } else {
-+              priv->default_offset = airoha_get_thermal_ADC(priv);
-+              priv->default_slope = EN7581_SLOPE_X100_DIO_DEFAULT;
-+              priv->init_temp = EN7581_INIT_TEMP_NONK_X10;
-+              dev_info(dev, "missing thermal calibrarion EFUSE, using non calibrated value\n");
-+      }
-+}
-+
-+static void airoha_thermal_setup_monitor(struct airoha_thermal_priv *priv)
-+{
-+      /* Set measure mode */
-+      writel(FIELD_PREP(EN7581_MSRCTL0, EN7581_MSRCTL_6SAMPLE_MAX_MIX_AVG4),
-+             priv->base + EN7581_TEMPMSRCTL0);
-+
-+      /*
-+       * Configure ADC valid reading addr
-+       * The AHB temp monitor system doesn't have direct access to the
-+       * thermal sensor. It does instead work by providing all kind of
-+       * address to configure how to access and setup an ADC for the
-+       * sensor. EN7581 supports only one sensor hence the
-+       * implementation is greatly simplified but the AHB supports
-+       * up to 4 different sensor from the same ADC that can be
-+       * switched by tuning the ADC mux or wiriting address.
-+       *
-+       * We set valid instead of volt as we don't enable valid/volt
-+       * split reading and AHB read valid addr in such case.
-+       */
-+      writel(priv->scu_adc_res.start + EN7581_DOUT_TADC,
-+             priv->base + EN7581_TEMPADCVALIDADDR);
-+
-+      /*
-+       * Configure valid bit on a fake value of bit 16. The ADC outputs
-+       * max of 2 bytes for voltage.
-+       */
-+      writel(FIELD_PREP(EN7581_ADV_RD_VALID_POS, 16),
-+             priv->base + EN7581_TEMPADCVALIDMASK);
-+
-+      /*
-+       * AHB supports max 12 bytes for ADC voltage. Shift the read
-+       * value 4 bit to the right. Precision lost by this is minimal
-+       * in the order of half a °C and is acceptable in the context
-+       * of triggering interrupt in critical condition.
-+       */
-+      writel(FIELD_PREP(EN7581_ADC_VOLTAGE_SHIFT, 4),
-+             priv->base + EN7581_TEMPADCVOLTAGESHIFT);
-+
-+      /* BUS clock is 300MHz counting unit is 3 * 68.64 * 256 = 52.715us */
-+      writel(FIELD_PREP(EN7581_PERIOD_UNIT, 3),
-+             priv->base + EN7581_TEMPMONCTL1);
-+
-+      /*
-+       * filt interval is 1 * 52.715us = 52.715us,
-+       * sen interval is 379 * 52.715us = 19.97ms
-+       */
-+      writel(FIELD_PREP(EN7581_FILT_INTERVAL, 1) |
-+             FIELD_PREP(EN7581_FILT_INTERVAL, 379),
-+             priv->base + EN7581_TEMPMONCTL2);
-+
-+      /* AHB poll is set to 146 * 68.64 = 10.02us */
-+      writel(FIELD_PREP(EN7581_ADC_POLL_INTVL, 146),
-+             priv->base + EN7581_TEMPAHBPOLL);
-+}
-+
-+static int airoha_thermal_probe(struct platform_device *pdev)
-+{
-+      struct airoha_thermal_priv *priv;
-+      struct device_node *chip_scu_np;
-+      struct device *dev = &pdev->dev;
-+      int irq, ret;
-+
-+      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-+      if (!priv)
-+              return -ENOMEM;
-+
-+      priv->base = devm_platform_ioremap_resource(pdev, 0);
-+      if (IS_ERR(priv->base))
-+              return PTR_ERR(priv->base);
-+
-+      chip_scu_np = of_parse_phandle(dev->of_node, "airoha,chip-scu", 0);
-+      if (!chip_scu_np)
-+              return -EINVAL;
-+
-+      priv->chip_scu = syscon_node_to_regmap(chip_scu_np);
-+      if (IS_ERR(priv->chip_scu))
-+              return PTR_ERR(priv->chip_scu);
-+
-+      of_address_to_resource(chip_scu_np, 0, &priv->scu_adc_res);
-+      of_node_put(chip_scu_np);
-+
-+      irq = platform_get_irq(pdev, 0);
-+      if (irq < 0)
-+              return irq;
-+
-+      ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
-+                                      airoha_thermal_irq, IRQF_ONESHOT,
-+                                      pdev->name, priv);
-+      if (ret) {
-+              dev_err(dev, "Can't get interrupt working.\n");
-+              return ret;
-+      }
-+
-+      airoha_thermal_setup_monitor(priv);
-+      airoha_thermal_setup_adc_val(dev, priv);
-+
-+      /* register of thermal sensor and get info from DT */
-+      priv->tz = devm_thermal_of_zone_register(dev, 0, priv, &thdev_ops);
-+      if (IS_ERR(priv->tz)) {
-+              dev_err(dev, "register thermal zone sensor failed\n");
-+              return PTR_ERR(priv->tz);
-+      }
-+
-+      platform_set_drvdata(pdev, priv);
-+
-+      /* Enable LOW and HIGH interrupt */
-+      writel(EN7581_HOFSINTEN0 | EN7581_LOFSINTEN0,
-+             priv->base + EN7581_TEMPMONINT);
-+
-+      return 0;
-+}
-+
-+static const struct of_device_id airoha_thermal_match[] = {
-+      { .compatible = "airoha,en7581-thermal" },
-+      {},
-+};
-+MODULE_DEVICE_TABLE(of, airoha_thermal_match);
-+
-+static struct platform_driver airoha_thermal_driver = {
-+      .driver = {
-+              .name = "airoha-thermal",
-+              .of_match_table = airoha_thermal_match,
-+      },
-+      .probe = airoha_thermal_probe,
-+};
-+
-+module_platform_driver(airoha_thermal_driver);
-+
-+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
-+MODULE_DESCRIPTION("Airoha thermal driver");
-+MODULE_LICENSE("GPL");
diff --git a/target/linux/airoha/patches-6.6/049-02-v6.16-thermal-drivers-airoha-Fix-spelling-mistake.patch b/target/linux/airoha/patches-6.6/049-02-v6.16-thermal-drivers-airoha-Fix-spelling-mistake.patch
deleted file mode 100644 (file)
index 7b1b947..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-From e23cba0ab49a9cf95e9bc3a86cfbf336b0e285f6 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Wed, 14 May 2025 23:39:12 +0200
-Subject: [PATCH] thermal/drivers/airoha: Fix spelling mistake
-
-Fix various spelling mistake in airoha_thermal_setup_monitor() and
-define.
-
-Reported-by: Alok Tiwari <alok.a.tiwari@oracle.com>
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Link: https://lore.kernel.org/r/20250514213919.2321490-1-ansuelsmth@gmail.com
-Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
----
- drivers/thermal/airoha_thermal.c | 10 +++++-----
- 1 file changed, 5 insertions(+), 5 deletions(-)
-
---- a/drivers/thermal/airoha_thermal.c
-+++ b/drivers/thermal/airoha_thermal.c
-@@ -155,7 +155,7 @@
-  * Can operate in:
-  * - 1 sample
-  * - 2 sample and make average of them
-- * - 4,6,10,16 sample, drop max and min and make avgerage of them
-+ * - 4,6,10,16 sample, drop max and min and make average of them
-  */
- #define   EN7581_MSRCTL_1SAMPLE                       0x0
- #define   EN7581_MSRCTL_AVG2SAMPLE            0x1
-@@ -365,12 +365,12 @@ static void airoha_thermal_setup_monitor
-       /*
-        * Configure ADC valid reading addr
-        * The AHB temp monitor system doesn't have direct access to the
--       * thermal sensor. It does instead work by providing all kind of
--       * address to configure how to access and setup an ADC for the
-+       * thermal sensor. It does instead work by providing various
-+       * addresses to configure how to access and setup an ADC for the
-        * sensor. EN7581 supports only one sensor hence the
-        * implementation is greatly simplified but the AHB supports
--       * up to 4 different sensor from the same ADC that can be
--       * switched by tuning the ADC mux or wiriting address.
-+       * up to 4 different sensors from the same ADC that can be
-+       * switched by tuning the ADC mux or writing address.
-        *
-        * We set valid instead of volt as we don't enable valid/volt
-        * split reading and AHB read valid addr in such case.
diff --git a/target/linux/airoha/patches-6.6/051-v6.15-pinctrl-airoha-fix-wrong-PHY-LED-mapping-and-PHY2-LE.patch b/target/linux/airoha/patches-6.6/051-v6.15-pinctrl-airoha-fix-wrong-PHY-LED-mapping-and-PHY2-LE.patch
deleted file mode 100644 (file)
index 3865327..0000000
+++ /dev/null
@@ -1,435 +0,0 @@
-From 457d9772e8a5cdae64f66b5f7d5b0247365191ec Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Tue, 1 Apr 2025 15:50:21 +0200
-Subject: [PATCH] pinctrl: airoha: fix wrong PHY LED mapping and PHY2 LED
- defines
-
-The current PHY2 LED define are wrong and actually set BITs outside the
-related mask. Fix it and set the correct value. While at it, also use
-FIELD_PREP_CONST macro to make it simple to understand what values are
-actually applied for the mask.
-
-Also fix wrong PHY LED mapping. The SoC Switch supports up to 4 port but
-the register define mapping for 5 PHY port, starting from 0. The mapping
-was wrongly defined starting from PHY1. Reorder the function group to
-start from PHY0. PHY4 is actually never supported as we don't have a
-GPIO pin to assign.
-
-Cc: stable@vger.kernel.org
-Fixes: 1c8ace2d0725 ("pinctrl: airoha: Add support for EN7581 SoC")
-Reviewed-by: Benjamin Larsson <benjamin.larsson@genexis.eu>
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/20250401135026.18018-1-ansuelsmth@gmail.com
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
----
- drivers/pinctrl/mediatek/pinctrl-airoha.c | 159 ++++++++++------------
- 1 file changed, 70 insertions(+), 89 deletions(-)
-
---- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
-+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
-@@ -6,6 +6,7 @@
-  */
- #include <dt-bindings/pinctrl/mt65xx.h>
-+#include <linux/bitfield.h>
- #include <linux/bits.h>
- #include <linux/cleanup.h>
- #include <linux/gpio/driver.h>
-@@ -106,39 +107,19 @@
- #define REG_LAN_LED1_MAPPING                  0x0280
- #define LAN4_LED_MAPPING_MASK                 GENMASK(18, 16)
--#define LAN4_PHY4_LED_MAP                     BIT(18)
--#define LAN4_PHY2_LED_MAP                     BIT(17)
--#define LAN4_PHY1_LED_MAP                     BIT(16)
--#define LAN4_PHY0_LED_MAP                     0
--#define LAN4_PHY3_LED_MAP                     GENMASK(17, 16)
-+#define LAN4_PHY_LED_MAP(_n)                  FIELD_PREP_CONST(LAN4_LED_MAPPING_MASK, (_n))
- #define LAN3_LED_MAPPING_MASK                 GENMASK(14, 12)
--#define LAN3_PHY4_LED_MAP                     BIT(14)
--#define LAN3_PHY2_LED_MAP                     BIT(13)
--#define LAN3_PHY1_LED_MAP                     BIT(12)
--#define LAN3_PHY0_LED_MAP                     0
--#define LAN3_PHY3_LED_MAP                     GENMASK(13, 12)
-+#define LAN3_PHY_LED_MAP(_n)                  FIELD_PREP_CONST(LAN3_LED_MAPPING_MASK, (_n))
- #define LAN2_LED_MAPPING_MASK                 GENMASK(10, 8)
--#define LAN2_PHY4_LED_MAP                     BIT(12)
--#define LAN2_PHY2_LED_MAP                     BIT(11)
--#define LAN2_PHY1_LED_MAP                     BIT(10)
--#define LAN2_PHY0_LED_MAP                     0
--#define LAN2_PHY3_LED_MAP                     GENMASK(11, 10)
-+#define LAN2_PHY_LED_MAP(_n)                  FIELD_PREP_CONST(LAN2_LED_MAPPING_MASK, (_n))
- #define LAN1_LED_MAPPING_MASK                 GENMASK(6, 4)
--#define LAN1_PHY4_LED_MAP                     BIT(6)
--#define LAN1_PHY2_LED_MAP                     BIT(5)
--#define LAN1_PHY1_LED_MAP                     BIT(4)
--#define LAN1_PHY0_LED_MAP                     0
--#define LAN1_PHY3_LED_MAP                     GENMASK(5, 4)
-+#define LAN1_PHY_LED_MAP(_n)                  FIELD_PREP_CONST(LAN1_LED_MAPPING_MASK, (_n))
- #define LAN0_LED_MAPPING_MASK                 GENMASK(2, 0)
--#define LAN0_PHY4_LED_MAP                     BIT(3)
--#define LAN0_PHY2_LED_MAP                     BIT(2)
--#define LAN0_PHY1_LED_MAP                     BIT(1)
--#define LAN0_PHY0_LED_MAP                     0
--#define LAN0_PHY3_LED_MAP                     GENMASK(2, 1)
-+#define LAN0_PHY_LED_MAP(_n)                  FIELD_PREP_CONST(LAN0_LED_MAPPING_MASK, (_n))
- /* CONF */
- #define REG_I2C_SDA_E2                                0x001c
-@@ -1470,8 +1451,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN1_LED_MAPPING_MASK,
--                      LAN1_PHY1_LED_MAP
-+                      LAN0_LED_MAPPING_MASK,
-+                      LAN0_PHY_LED_MAP(0)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1485,8 +1466,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN2_LED_MAPPING_MASK,
--                      LAN2_PHY1_LED_MAP
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY_LED_MAP(0)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1500,8 +1481,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN3_LED_MAPPING_MASK,
--                      LAN3_PHY1_LED_MAP
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY_LED_MAP(0)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1515,8 +1496,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN4_LED_MAPPING_MASK,
--                      LAN4_PHY1_LED_MAP
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY_LED_MAP(0)
-               },
-               .regmap_size = 2,
-       },
-@@ -1534,8 +1515,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN1_LED_MAPPING_MASK,
--                      LAN1_PHY2_LED_MAP
-+                      LAN0_LED_MAPPING_MASK,
-+                      LAN0_PHY_LED_MAP(1)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1549,8 +1530,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN2_LED_MAPPING_MASK,
--                      LAN2_PHY2_LED_MAP
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY_LED_MAP(1)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1564,8 +1545,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN3_LED_MAPPING_MASK,
--                      LAN3_PHY2_LED_MAP
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY_LED_MAP(1)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1579,8 +1560,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN4_LED_MAPPING_MASK,
--                      LAN4_PHY2_LED_MAP
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY_LED_MAP(1)
-               },
-               .regmap_size = 2,
-       },
-@@ -1598,8 +1579,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN1_LED_MAPPING_MASK,
--                      LAN1_PHY3_LED_MAP
-+                      LAN0_LED_MAPPING_MASK,
-+                      LAN0_PHY_LED_MAP(2)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1613,8 +1594,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN2_LED_MAPPING_MASK,
--                      LAN2_PHY3_LED_MAP
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY_LED_MAP(2)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1628,8 +1609,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN3_LED_MAPPING_MASK,
--                      LAN3_PHY3_LED_MAP
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY_LED_MAP(2)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1643,8 +1624,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN4_LED_MAPPING_MASK,
--                      LAN4_PHY3_LED_MAP
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY_LED_MAP(2)
-               },
-               .regmap_size = 2,
-       },
-@@ -1662,8 +1643,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN1_LED_MAPPING_MASK,
--                      LAN1_PHY4_LED_MAP
-+                      LAN0_LED_MAPPING_MASK,
-+                      LAN0_PHY_LED_MAP(3)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1677,8 +1658,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN2_LED_MAPPING_MASK,
--                      LAN2_PHY4_LED_MAP
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY_LED_MAP(3)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1692,8 +1673,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN3_LED_MAPPING_MASK,
--                      LAN3_PHY4_LED_MAP
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY_LED_MAP(3)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1707,8 +1688,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED0_MAPPING,
--                      LAN4_LED_MAPPING_MASK,
--                      LAN4_PHY4_LED_MAP
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY_LED_MAP(3)
-               },
-               .regmap_size = 2,
-       },
-@@ -1726,8 +1707,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN1_LED_MAPPING_MASK,
--                      LAN1_PHY1_LED_MAP
-+                      LAN0_LED_MAPPING_MASK,
-+                      LAN0_PHY_LED_MAP(0)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1741,8 +1722,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN2_LED_MAPPING_MASK,
--                      LAN2_PHY1_LED_MAP
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY_LED_MAP(0)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1756,8 +1737,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN3_LED_MAPPING_MASK,
--                      LAN3_PHY1_LED_MAP
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY_LED_MAP(0)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1771,8 +1752,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN4_LED_MAPPING_MASK,
--                      LAN4_PHY1_LED_MAP
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY_LED_MAP(0)
-               },
-               .regmap_size = 2,
-       },
-@@ -1790,8 +1771,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN1_LED_MAPPING_MASK,
--                      LAN1_PHY2_LED_MAP
-+                      LAN0_LED_MAPPING_MASK,
-+                      LAN0_PHY_LED_MAP(1)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1805,8 +1786,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN2_LED_MAPPING_MASK,
--                      LAN2_PHY2_LED_MAP
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY_LED_MAP(1)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1820,8 +1801,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN3_LED_MAPPING_MASK,
--                      LAN3_PHY2_LED_MAP
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY_LED_MAP(1)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1835,8 +1816,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN4_LED_MAPPING_MASK,
--                      LAN4_PHY2_LED_MAP
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY_LED_MAP(1)
-               },
-               .regmap_size = 2,
-       },
-@@ -1854,8 +1835,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN1_LED_MAPPING_MASK,
--                      LAN1_PHY3_LED_MAP
-+                      LAN0_LED_MAPPING_MASK,
-+                      LAN0_PHY_LED_MAP(2)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1869,8 +1850,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN2_LED_MAPPING_MASK,
--                      LAN2_PHY3_LED_MAP
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY_LED_MAP(2)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1884,8 +1865,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN3_LED_MAPPING_MASK,
--                      LAN3_PHY3_LED_MAP
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY_LED_MAP(2)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1899,8 +1880,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN4_LED_MAPPING_MASK,
--                      LAN4_PHY3_LED_MAP
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY_LED_MAP(2)
-               },
-               .regmap_size = 2,
-       },
-@@ -1918,8 +1899,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN1_LED_MAPPING_MASK,
--                      LAN1_PHY4_LED_MAP
-+                      LAN0_LED_MAPPING_MASK,
-+                      LAN0_PHY_LED_MAP(3)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1933,8 +1914,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN2_LED_MAPPING_MASK,
--                      LAN2_PHY4_LED_MAP
-+                      LAN1_LED_MAPPING_MASK,
-+                      LAN1_PHY_LED_MAP(3)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1948,8 +1929,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN3_LED_MAPPING_MASK,
--                      LAN3_PHY4_LED_MAP
-+                      LAN2_LED_MAPPING_MASK,
-+                      LAN2_PHY_LED_MAP(3)
-               },
-               .regmap_size = 2,
-       }, {
-@@ -1963,8 +1944,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-                       REG_LAN_LED1_MAPPING,
--                      LAN4_LED_MAPPING_MASK,
--                      LAN4_PHY4_LED_MAP
-+                      LAN3_LED_MAPPING_MASK,
-+                      LAN3_PHY_LED_MAP(3)
-               },
-               .regmap_size = 2,
-       },
diff --git a/target/linux/airoha/patches-6.6/060-v6.16-02-net-phy-mediatek-add-Airoha-PHY-ID-to-SoC-driver.patch b/target/linux/airoha/patches-6.6/060-v6.16-02-net-phy-mediatek-add-Airoha-PHY-ID-to-SoC-driver.patch
deleted file mode 100644 (file)
index 68d21dd..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-From 6a325aed130bb68790e765f923e76ec5669d2da7 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Thu, 10 Apr 2025 12:04:04 +0200
-Subject: [PATCH 2/2] net: phy: mediatek: add Airoha PHY ID to SoC driver
-
-Airoha AN7581 SoC ship with a Switch based on the MT753x Switch embedded
-in other SoC like the MT7581 and the MT7988. Similar to these they
-require configuring some pin to enable LED PHYs.
-
-Add support for the PHY ID for the Airoha embedded Switch and define a
-simple probe function to toggle these pins. Also fill the LED functions
-and add dedicated function to define LED polarity.
-
-Reviewed-by: Andrew Lunn <andrew@lunn.ch>
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Link: https://patch.msgid.link/20250410100410.348-2-ansuelsmth@gmail.com
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/phy/mediatek/Kconfig      |  4 +-
- drivers/net/phy/mediatek/mtk-ge-soc.c | 62 +++++++++++++++++++++++++++
- 2 files changed, 65 insertions(+), 1 deletion(-)
-
---- a/drivers/net/phy/mediatek/Kconfig
-+++ b/drivers/net/phy/mediatek/Kconfig
-@@ -15,7 +15,9 @@ config MEDIATEK_GE_PHY
- config MEDIATEK_GE_SOC_PHY
-       tristate "MediaTek SoC Ethernet PHYs"
--      depends on (ARM64 && ARCH_MEDIATEK && NVMEM_MTK_EFUSE) || COMPILE_TEST
-+      depends on ARM64 || COMPILE_TEST
-+      depends on ARCH_AIROHA || (ARCH_MEDIATEK && NVMEM_MTK_EFUSE) || \
-+                 COMPILE_TEST
-       select MTK_NET_PHYLIB
-       help
-         Supports MediaTek SoC built-in Gigabit Ethernet PHYs.
---- a/drivers/net/phy/mediatek/mtk-ge-soc.c
-+++ b/drivers/net/phy/mediatek/mtk-ge-soc.c
-@@ -10,8 +10,11 @@
- #include "mtk.h"
-+#define MTK_PHY_MAX_LEDS                      2
-+
- #define MTK_GPHY_ID_MT7981                    0x03a29461
- #define MTK_GPHY_ID_MT7988                    0x03a29481
-+#define MTK_GPHY_ID_AN7581                    0x03a294c1
- #define MTK_EXT_PAGE_ACCESS                   0x1f
- #define MTK_PHY_PAGE_STANDARD                 0x0000
-@@ -1405,6 +1408,53 @@ static int mt7981_phy_probe(struct phy_d
-       return mt798x_phy_calibration(phydev);
- }
-+static int an7581_phy_probe(struct phy_device *phydev)
-+{
-+      struct mtk_socphy_priv *priv;
-+      struct pinctrl *pinctrl;
-+
-+      /* Toggle pinctrl to enable PHY LED */
-+      pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "gbe-led");
-+      if (IS_ERR(pinctrl))
-+              dev_err(&phydev->mdio.bus->dev,
-+                      "Failed to setup PHY LED pinctrl\n");
-+
-+      priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
-+      if (!priv)
-+              return -ENOMEM;
-+
-+      phydev->priv = priv;
-+
-+      return 0;
-+}
-+
-+static int an7581_phy_led_polarity_set(struct phy_device *phydev, int index,
-+                                     unsigned long modes)
-+{
-+      u32 mode;
-+      u16 val;
-+
-+      if (index >= MTK_PHY_MAX_LEDS)
-+              return -EINVAL;
-+
-+      for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
-+              switch (mode) {
-+              case PHY_LED_ACTIVE_LOW:
-+                      val = MTK_PHY_LED_ON_POLARITY;
-+                      break;
-+              case PHY_LED_ACTIVE_HIGH:
-+                      val = 0;
-+                      break;
-+              default:
-+                      return -EINVAL;
-+              }
-+      }
-+
-+      return phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ?
-+                            MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL,
-+                            MTK_PHY_LED_ON_POLARITY, val);
-+}
-+
- static struct phy_driver mtk_socphy_driver[] = {
-       {
-               PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981),
-@@ -1440,6 +1490,17 @@ static struct phy_driver mtk_socphy_driv
-               .led_hw_control_set = mt798x_phy_led_hw_control_set,
-               .led_hw_control_get = mt798x_phy_led_hw_control_get,
-       },
-+      {
-+              PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7581),
-+              .name           = "Airoha AN7581 PHY",
-+              .probe          = an7581_phy_probe,
-+              .led_blink_set  = mt798x_phy_led_blink_set,
-+              .led_brightness_set = mt798x_phy_led_brightness_set,
-+              .led_hw_is_supported = mt798x_phy_led_hw_is_supported,
-+              .led_hw_control_set = mt798x_phy_led_hw_control_set,
-+              .led_hw_control_get = mt798x_phy_led_hw_control_get,
-+              .led_polarity_set = an7581_phy_led_polarity_set,
-+      },
- };
- module_phy_driver(mtk_socphy_driver);
-@@ -1447,6 +1508,7 @@ module_phy_driver(mtk_socphy_driver);
- static struct mdio_device_id __maybe_unused mtk_socphy_tbl[] = {
-       { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981) },
-       { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7988) },
-+      { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7581) },
-       { }
- };
diff --git a/target/linux/airoha/patches-6.6/063-01-v6.15-net-airoha-Move-min-max-packet-len-configuration-in-.patch b/target/linux/airoha/patches-6.6/063-01-v6.15-net-airoha-Move-min-max-packet-len-configuration-in-.patch
deleted file mode 100644 (file)
index 03654f4..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-From 54d989d58d2ac87c8504c2306ba8b4957c60e8dc Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 4 Mar 2025 15:21:08 +0100
-Subject: [PATCH 1/6] net: airoha: Move min/max packet len configuration in
- airoha_dev_open()
-
-In order to align max allowed packet size to the configured mtu, move
-REG_GDM_LEN_CFG configuration in airoha_dev_open routine.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250304-airoha-eth-rx-sg-v1-1-283ebc61120e@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 14 +++++++-------
- 1 file changed, 7 insertions(+), 7 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -138,15 +138,10 @@ static void airoha_fe_maccr_init(struct
- {
-       int p;
--      for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
-+      for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
-               airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
-                             GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
-                             GDM_DROP_CRC_ERR);
--              airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
--                            GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
--                            FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
--                            FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
--      }
-       airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
-                     FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
-@@ -1521,9 +1516,9 @@ static void airoha_update_hw_stats(struc
- static int airoha_dev_open(struct net_device *dev)
- {
-+      int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
-       struct airoha_gdm_port *port = netdev_priv(dev);
-       struct airoha_qdma *qdma = port->qdma;
--      int err;
-       netif_tx_start_all_queues(dev);
-       err = airoha_set_vip_for_gdm_port(port, true);
-@@ -1537,6 +1532,11 @@ static int airoha_dev_open(struct net_de
-               airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
-                               GDM_STAG_EN_MASK);
-+      airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
-+                    GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
-+                    FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
-+                    FIELD_PREP(GDM_LONG_LEN_MASK, len));
-+
-       airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
-                       GLOBAL_CFG_TX_DMA_EN_MASK |
-                       GLOBAL_CFG_RX_DMA_EN_MASK);
diff --git a/target/linux/airoha/patches-6.6/063-02-v6.15-net-airoha-Enable-Rx-Scatter-Gather.patch b/target/linux/airoha/patches-6.6/063-02-v6.15-net-airoha-Enable-Rx-Scatter-Gather.patch
deleted file mode 100644 (file)
index cea179c..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
-From e12182ddb6e712951d21a50e2c8ccd700e41a40c Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 4 Mar 2025 15:21:09 +0100
-Subject: [PATCH 2/6] net: airoha: Enable Rx Scatter-Gather
-
-EN7581 SoC can receive 9k frames. Enable the reception of Scatter-Gather
-(SG) frames.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250304-airoha-eth-rx-sg-v1-2-283ebc61120e@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c  | 68 ++++++++++++++---------
- drivers/net/ethernet/airoha/airoha_eth.h  |  1 +
- drivers/net/ethernet/airoha/airoha_regs.h |  5 ++
- 3 files changed, 48 insertions(+), 26 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -615,10 +615,10 @@ static int airoha_qdma_rx_process(struct
-               struct airoha_qdma_desc *desc = &q->desc[q->tail];
-               u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
-               dma_addr_t dma_addr = le32_to_cpu(desc->addr);
-+              struct page *page = virt_to_head_page(e->buf);
-               u32 desc_ctrl = le32_to_cpu(desc->ctrl);
-               struct airoha_gdm_port *port;
--              struct sk_buff *skb;
--              int len, p;
-+              int data_len, len, p;
-               if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
-                       break;
-@@ -636,30 +636,41 @@ static int airoha_qdma_rx_process(struct
-               dma_sync_single_for_cpu(eth->dev, dma_addr,
-                                       SKB_WITH_OVERHEAD(q->buf_size), dir);
-+              data_len = q->skb ? q->buf_size
-+                                : SKB_WITH_OVERHEAD(q->buf_size);
-+              if (data_len < len)
-+                      goto free_frag;
-+
-               p = airoha_qdma_get_gdm_port(eth, desc);
--              if (p < 0 || !eth->ports[p]) {
--                      page_pool_put_full_page(q->page_pool,
--                                              virt_to_head_page(e->buf),
--                                              true);
--                      continue;
--              }
-+              if (p < 0 || !eth->ports[p])
-+                      goto free_frag;
-               port = eth->ports[p];
--              skb = napi_build_skb(e->buf, q->buf_size);
--              if (!skb) {
--                      page_pool_put_full_page(q->page_pool,
--                                              virt_to_head_page(e->buf),
--                                              true);
--                      break;
-+              if (!q->skb) { /* first buffer */
-+                      q->skb = napi_build_skb(e->buf, q->buf_size);
-+                      if (!q->skb)
-+                              goto free_frag;
-+
-+                      __skb_put(q->skb, len);
-+                      skb_mark_for_recycle(q->skb);
-+                      q->skb->dev = port->dev;
-+                      q->skb->protocol = eth_type_trans(q->skb, port->dev);
-+                      q->skb->ip_summed = CHECKSUM_UNNECESSARY;
-+                      skb_record_rx_queue(q->skb, qid);
-+              } else { /* scattered frame */
-+                      struct skb_shared_info *shinfo = skb_shinfo(q->skb);
-+                      int nr_frags = shinfo->nr_frags;
-+
-+                      if (nr_frags >= ARRAY_SIZE(shinfo->frags))
-+                              goto free_frag;
-+
-+                      skb_add_rx_frag(q->skb, nr_frags, page,
-+                                      e->buf - page_address(page), len,
-+                                      q->buf_size);
-               }
--              skb_reserve(skb, 2);
--              __skb_put(skb, len);
--              skb_mark_for_recycle(skb);
--              skb->dev = port->dev;
--              skb->protocol = eth_type_trans(skb, skb->dev);
--              skb->ip_summed = CHECKSUM_UNNECESSARY;
--              skb_record_rx_queue(skb, qid);
-+              if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
-+                      continue;
-               if (netdev_uses_dsa(port->dev)) {
-                       /* PPE module requires untagged packets to work
-@@ -672,22 +683,27 @@ static int airoha_qdma_rx_process(struct
-                       if (sptag < ARRAY_SIZE(port->dsa_meta) &&
-                           port->dsa_meta[sptag])
--                              skb_dst_set_noref(skb,
-+                              skb_dst_set_noref(q->skb,
-                                                 &port->dsa_meta[sptag]->dst);
-               }
-               hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
-               if (hash != AIROHA_RXD4_FOE_ENTRY)
--                      skb_set_hash(skb, jhash_1word(hash, 0),
-+                      skb_set_hash(q->skb, jhash_1word(hash, 0),
-                                    PKT_HASH_TYPE_L4);
-               reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
-               if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
-                       airoha_ppe_check_skb(eth->ppe, hash);
--              napi_gro_receive(&q->napi, skb);
--
-               done++;
-+              napi_gro_receive(&q->napi, q->skb);
-+              q->skb = NULL;
-+              continue;
-+free_frag:
-+              page_pool_put_full_page(q->page_pool, page, true);
-+              dev_kfree_skb(q->skb);
-+              q->skb = NULL;
-       }
-       airoha_qdma_fill_rx_queue(q);
-@@ -763,6 +779,7 @@ static int airoha_qdma_init_rx_queue(str
-                       FIELD_PREP(RX_RING_THR_MASK, thr));
-       airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
-                       FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
-+      airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
-       airoha_qdma_fill_rx_queue(q);
-@@ -1162,7 +1179,6 @@ static int airoha_qdma_hw_init(struct ai
-       }
-       airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
--                     GLOBAL_CFG_RX_2B_OFFSET_MASK |
-                      FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
-                      GLOBAL_CFG_CPU_TXR_RR_MASK |
-                      GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -176,6 +176,7 @@ struct airoha_queue {
-       struct napi_struct napi;
-       struct page_pool *page_pool;
-+      struct sk_buff *skb;
- };
- struct airoha_tx_irq_queue {
---- a/drivers/net/ethernet/airoha/airoha_regs.h
-+++ b/drivers/net/ethernet/airoha/airoha_regs.h
-@@ -626,10 +626,15 @@
- #define REG_RX_DELAY_INT_IDX(_n)      \
-       (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
-+#define REG_RX_SCATTER_CFG(_n)        \
-+      (((_n) < 16) ? 0x0214 + ((_n) << 5) : 0x0e14 + (((_n) - 16) << 5))
-+
- #define RX_DELAY_INT_MASK             GENMASK(15, 0)
- #define RX_RING_DMA_IDX_MASK          GENMASK(15, 0)
-+#define RX_RING_SG_EN_MASK            BIT(0)
-+
- #define REG_INGRESS_TRTCM_CFG         0x0070
- #define INGRESS_TRTCM_EN_MASK         BIT(31)
- #define INGRESS_TRTCM_MODE_MASK               BIT(30)
diff --git a/target/linux/airoha/patches-6.6/063-03-v6.15-net-airoha-Introduce-airoha_dev_change_mtu-callback.patch b/target/linux/airoha/patches-6.6/063-03-v6.15-net-airoha-Introduce-airoha_dev_change_mtu-callback.patch
deleted file mode 100644 (file)
index 2a4aa08..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-From 03b1b69f0662c46f258a45e4a7d7837351c11692 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 4 Mar 2025 15:21:10 +0100
-Subject: [PATCH 3/6] net: airoha: Introduce airoha_dev_change_mtu callback
-
-Add airoha_dev_change_mtu callback to update the MTU of a running
-device.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250304-airoha-eth-rx-sg-v1-3-283ebc61120e@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 15 +++++++++++++++
- 1 file changed, 15 insertions(+)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -1706,6 +1706,20 @@ static void airoha_dev_get_stats64(struc
-       } while (u64_stats_fetch_retry(&port->stats.syncp, start));
- }
-+static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
-+{
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct airoha_eth *eth = port->qdma->eth;
-+      u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
-+
-+      airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
-+                    GDM_LONG_LEN_MASK,
-+                    FIELD_PREP(GDM_LONG_LEN_MASK, len));
-+      WRITE_ONCE(dev->mtu, mtu);
-+
-+      return 0;
-+}
-+
- static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
-                                  struct net_device *sb_dev)
- {
-@@ -2398,6 +2412,7 @@ static const struct net_device_ops airoh
-       .ndo_init               = airoha_dev_init,
-       .ndo_open               = airoha_dev_open,
-       .ndo_stop               = airoha_dev_stop,
-+      .ndo_change_mtu         = airoha_dev_change_mtu,
-       .ndo_select_queue       = airoha_dev_select_queue,
-       .ndo_start_xmit         = airoha_dev_xmit,
-       .ndo_get_stats64        = airoha_dev_get_stats64,
diff --git a/target/linux/airoha/patches-6.6/063-04-v6.15-net-airoha-Increase-max-mtu-to-9k.patch b/target/linux/airoha/patches-6.6/063-04-v6.15-net-airoha-Increase-max-mtu-to-9k.patch
deleted file mode 100644 (file)
index 8771ff2..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-From 168ef0c1dee83c401896a0bca680e9f97b1ebd64 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 4 Mar 2025 15:21:11 +0100
-Subject: [PATCH 4/6] net: airoha: Increase max mtu to 9k
-
-EN7581 SoC supports 9k maximum MTU.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250304-airoha-eth-rx-sg-v1-4-283ebc61120e@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -20,7 +20,7 @@
- #define AIROHA_MAX_DSA_PORTS          7
- #define AIROHA_MAX_NUM_RSTS           3
- #define AIROHA_MAX_NUM_XSI_RSTS               5
--#define AIROHA_MAX_MTU                        2000
-+#define AIROHA_MAX_MTU                        9216
- #define AIROHA_MAX_PACKET_SIZE                2048
- #define AIROHA_NUM_QOS_CHANNELS               4
- #define AIROHA_NUM_QOS_QUEUES         8
diff --git a/target/linux/airoha/patches-6.6/063-05-v6.15-net-airoha-Fix-lan4-support-in-airoha_qdma_get_gdm_p.patch b/target/linux/airoha/patches-6.6/063-05-v6.15-net-airoha-Fix-lan4-support-in-airoha_qdma_get_gdm_p.patch
deleted file mode 100644 (file)
index 1c3030a..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-From 35ea4f06fd33fc32f556a0c26d1d8340497fa7f8 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 4 Mar 2025 15:38:05 +0100
-Subject: [PATCH 5/6] net: airoha: Fix lan4 support in
- airoha_qdma_get_gdm_port()
-
-EN7581 SoC supports lan{1,4} ports on MT7530 DSA switch. Fix lan4
-reported value in airoha_qdma_get_gdm_port routine.
-
-Fixes: 23020f0493270 ("net: airoha: Introduce ethernet support for EN7581 SoC")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250304-airoha-eth-fix-lan4-v1-1-832417da4bb5@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -589,7 +589,7 @@ static int airoha_qdma_get_gdm_port(stru
-       sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
-       switch (sport) {
--      case 0x10 ... 0x13:
-+      case 0x10 ... 0x14:
-               port = 0;
-               break;
-       case 0x2 ... 0x4:
diff --git a/target/linux/airoha/patches-6.6/063-06-v6.15-net-airoha-Enable-TSO-Scatter-Gather-for-LAN-port.patch b/target/linux/airoha/patches-6.6/063-06-v6.15-net-airoha-Enable-TSO-Scatter-Gather-for-LAN-port.patch
deleted file mode 100644 (file)
index 28a85e6..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-From a202dfe31cae2f2120297a7142385d80a5577d42 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 4 Mar 2025 16:46:40 +0100
-Subject: [PATCH 6/6] net: airoha: Enable TSO/Scatter Gather for LAN port
-
-Set net_device vlan_features in order to enable TSO and Scatter Gather
-for DSA user ports.
-
-Reviewed-by: Mateusz Polchlopek <mateusz.polchlopek@intel.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250304-lan-enable-tso-v1-1-b398eb9976ba@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -2503,6 +2503,7 @@ static int airoha_alloc_gdm_port(struct
-                          NETIF_F_SG | NETIF_F_TSO |
-                          NETIF_F_HW_TC;
-       dev->features |= dev->hw_features;
-+      dev->vlan_features = dev->hw_features;
-       dev->dev.of_node = np;
-       dev->irq = qdma->irq;
-       SET_NETDEV_DEV(dev, eth->dev);
diff --git a/target/linux/airoha/patches-6.6/064-v6.15-net-airoha-Fix-dev-dsa_ptr-check-in-airoha_get_dsa_t.patch b/target/linux/airoha/patches-6.6/064-v6.15-net-airoha-Fix-dev-dsa_ptr-check-in-airoha_get_dsa_t.patch
deleted file mode 100644 (file)
index 7134de9..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-From e368d2a1e8b6f0926e4e76a56b484249905192f5 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 6 Mar 2025 11:52:20 +0100
-Subject: [PATCH] net: airoha: Fix dev->dsa_ptr check in airoha_get_dsa_tag()
-
-Fix the following warning reported by Smatch static checker in
-airoha_get_dsa_tag routine:
-
-drivers/net/ethernet/airoha/airoha_eth.c:1722 airoha_get_dsa_tag()
-warn: 'dp' isn't an ERR_PTR
-
-dev->dsa_ptr can't be set to an error pointer, it can just be NULL.
-Remove this check since it is already performed in netdev_uses_dsa().
-
-Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
-Closes: https://lore.kernel.org/netdev/Z8l3E0lGOcrel07C@lore-desk/T/#m54adc113fcdd8c5e6c5f65ffd60d8e8b1d483d90
-Fixes: af3cf757d5c9 ("net: airoha: Move DSA tag in DMA descriptor")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250306-airoha-flowtable-fixes-v1-1-68d3c1296cdd@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 7 +------
- 1 file changed, 1 insertion(+), 6 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -1742,18 +1742,13 @@ static u32 airoha_get_dsa_tag(struct sk_
- {
- #if IS_ENABLED(CONFIG_NET_DSA)
-       struct ethhdr *ehdr;
--      struct dsa_port *dp;
-       u8 xmit_tpid;
-       u16 tag;
-       if (!netdev_uses_dsa(dev))
-               return 0;
--      dp = dev->dsa_ptr;
--      if (IS_ERR(dp))
--              return 0;
--
--      if (dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
-+      if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
-               return 0;
-       if (skb_cow_head(skb, 0))
diff --git a/target/linux/airoha/patches-6.6/065-v6.15-net-airoha-fix-CONFIG_DEBUG_FS-check.patch b/target/linux/airoha/patches-6.6/065-v6.15-net-airoha-fix-CONFIG_DEBUG_FS-check.patch
deleted file mode 100644 (file)
index a846740..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-From 08d0185e36ad8bb5902a73711bf114765d282161 Mon Sep 17 00:00:00 2001
-From: Arnd Bergmann <arnd@arndb.de>
-Date: Fri, 14 Mar 2025 16:49:59 +0100
-Subject: [PATCH] net: airoha: fix CONFIG_DEBUG_FS check
-
-The #if check causes a build failure when CONFIG_DEBUG_FS is turned
-off:
-
-In file included from drivers/net/ethernet/airoha/airoha_eth.c:17:
-drivers/net/ethernet/airoha/airoha_eth.h:543:5: error: "CONFIG_DEBUG_FS" is not defined, evaluates to 0 [-Werror=undef]
-  543 | #if CONFIG_DEBUG_FS
-      |     ^~~~~~~~~~~~~~~
-
-Replace it with the correct #ifdef.
-
-Fixes: 3fe15c640f38 ("net: airoha: Introduce PPE debugfs support")
-Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250314155009.4114308-1-arnd@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -540,7 +540,7 @@ void airoha_ppe_deinit(struct airoha_eth
- struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
-                                                 u32 hash);
--#if CONFIG_DEBUG_FS
-+#ifdef CONFIG_DEBUG_FS
- int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
- #else
- static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
diff --git a/target/linux/airoha/patches-6.6/066-01-v6.15-net-airoha-Fix-qid-report-in-airoha_tc_get_htb_get_l.patch b/target/linux/airoha/patches-6.6/066-01-v6.15-net-airoha-Fix-qid-report-in-airoha_tc_get_htb_get_l.patch
deleted file mode 100644 (file)
index 0a815c1..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-From 57b290d97c6150774bf929117ca737a26d8fc33d Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 31 Mar 2025 08:52:53 +0200
-Subject: [PATCH 1/2] net: airoha: Fix qid report in
- airoha_tc_get_htb_get_leaf_queue()
-
-Fix the following kernel warning deleting HTB offloaded leafs and/or root
-HTB qdisc in airoha_eth driver properly reporting qid in
-airoha_tc_get_htb_get_leaf_queue routine.
-
-$tc qdisc replace dev eth1 root handle 10: htb offload
-$tc class add dev eth1 arent 10: classid 10:4 htb rate 100mbit ceil 100mbit
-$tc qdisc replace dev eth1 parent 10:4 handle 4: ets bands 8 \
- quanta 1514 3028 4542 6056 7570 9084 10598 12112
-$tc qdisc del dev eth1 root
-
-[   55.827864] ------------[ cut here ]------------
-[   55.832493] WARNING: CPU: 3 PID: 2678 at 0xffffffc0798695a4
-[   55.956510] CPU: 3 PID: 2678 Comm: tc Tainted: G           O 6.6.71 #0
-[   55.963557] Hardware name: Airoha AN7581 Evaluation Board (DT)
-[   55.969383] pstate: 20400005 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
-[   55.976344] pc : 0xffffffc0798695a4
-[   55.979851] lr : 0xffffffc079869a20
-[   55.983358] sp : ffffffc0850536a0
-[   55.986665] x29: ffffffc0850536a0 x28: 0000000000000024 x27: 0000000000000001
-[   55.993800] x26: 0000000000000000 x25: ffffff8008b19000 x24: ffffff800222e800
-[   56.000935] x23: 0000000000000001 x22: 0000000000000000 x21: ffffff8008b19000
-[   56.008071] x20: ffffff8002225800 x19: ffffff800379d000 x18: 0000000000000000
-[   56.015206] x17: ffffffbf9ea59000 x16: ffffffc080018000 x15: 0000000000000000
-[   56.022342] x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000001
-[   56.029478] x11: ffffffc081471008 x10: ffffffc081575a98 x9 : 0000000000000000
-[   56.036614] x8 : ffffffc08167fd40 x7 : ffffffc08069e104 x6 : ffffff8007f86000
-[   56.043748] x5 : 0000000000000000 x4 : 0000000000000000 x3 : 0000000000000001
-[   56.050884] x2 : 0000000000000000 x1 : 0000000000000250 x0 : ffffff800222c000
-[   56.058020] Call trace:
-[   56.060459]  0xffffffc0798695a4
-[   56.063618]  0xffffffc079869a20
-[   56.066777]  __qdisc_destroy+0x40/0xa0
-[   56.070528]  qdisc_put+0x54/0x6c
-[   56.073748]  qdisc_graft+0x41c/0x648
-[   56.077324]  tc_get_qdisc+0x168/0x2f8
-[   56.080978]  rtnetlink_rcv_msg+0x230/0x330
-[   56.085076]  netlink_rcv_skb+0x5c/0x128
-[   56.088913]  rtnetlink_rcv+0x14/0x1c
-[   56.092490]  netlink_unicast+0x1e0/0x2c8
-[   56.096413]  netlink_sendmsg+0x198/0x3c8
-[   56.100337]  ____sys_sendmsg+0x1c4/0x274
-[   56.104261]  ___sys_sendmsg+0x7c/0xc0
-[   56.107924]  __sys_sendmsg+0x44/0x98
-[   56.111492]  __arm64_sys_sendmsg+0x20/0x28
-[   56.115580]  invoke_syscall.constprop.0+0x58/0xfc
-[   56.120285]  do_el0_svc+0x3c/0xbc
-[   56.123592]  el0_svc+0x18/0x4c
-[   56.126647]  el0t_64_sync_handler+0x118/0x124
-[   56.131005]  el0t_64_sync+0x150/0x154
-[   56.134660] ---[ end trace 0000000000000000 ]---
-
-Fixes: ef1ca9271313b ("net: airoha: Add sched HTB offload support")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Acked-by: Paolo Abeni <pabeni@redhat.com>
-Link: https://patch.msgid.link/20250331-airoha-htb-qdisc-offload-del-fix-v1-1-4ea429c2c968@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -2356,7 +2356,7 @@ static int airoha_tc_get_htb_get_leaf_qu
-               return -EINVAL;
-       }
--      opt->qid = channel;
-+      opt->qid = AIROHA_NUM_TX_RING + channel;
-       return 0;
- }
diff --git a/target/linux/airoha/patches-6.6/066-02-v6.15-net-airoha-Fix-ETS-priomap-validation.patch b/target/linux/airoha/patches-6.6/066-02-v6.15-net-airoha-Fix-ETS-priomap-validation.patch
deleted file mode 100644 (file)
index 118047e..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-From 367579274f60cb23c570ae5348966ab51e1509a4 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 31 Mar 2025 18:17:31 +0200
-Subject: [PATCH 2/2] net: airoha: Fix ETS priomap validation
-
-ETS Qdisc schedules SP bands in a priority order assigning band-0 the
-highest priority (band-0 > band-1 > .. > band-n) while EN7581 arranges
-SP bands in a priority order assigning band-7 the highest priority
-(band-7 > band-6, .. > band-n).
-Fix priomap check in airoha_qdma_set_tx_ets_sched routine in order to
-align ETS Qdisc and airoha_eth driver SP priority ordering.
-
-Fixes: b56e4d660a96 ("net: airoha: Enforce ETS Qdisc priomap")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Reviewed-by: Davide Caratti <dcaratti@redhat.com>
-Link: https://patch.msgid.link/20250331-airoha-ets-validate-priomap-v1-1-60a524488672@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 16 ++++++++--------
- 1 file changed, 8 insertions(+), 8 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -2029,7 +2029,7 @@ static int airoha_qdma_set_tx_ets_sched(
-       struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
-       enum tx_sched_mode mode = TC_SCH_SP;
-       u16 w[AIROHA_NUM_QOS_QUEUES] = {};
--      int i, nstrict = 0, nwrr, qidx;
-+      int i, nstrict = 0;
-       if (p->bands > AIROHA_NUM_QOS_QUEUES)
-               return -EINVAL;
-@@ -2047,17 +2047,17 @@ static int airoha_qdma_set_tx_ets_sched(
-        * lowest priorities with respect to SP ones.
-        * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
-        */
--      nwrr = p->bands - nstrict;
--      qidx = nstrict && nwrr ? nstrict : 0;
--      for (i = 1; i <= p->bands; i++) {
--              if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx)
-+      for (i = 0; i < nstrict; i++) {
-+              if (p->priomap[p->bands - i - 1] != i)
-                       return -EINVAL;
--
--              qidx = i == nwrr ? 0 : qidx + 1;
-       }
--      for (i = 0; i < nwrr; i++)
-+      for (i = 0; i < p->bands - nstrict; i++) {
-+              if (p->priomap[i] != nstrict + i)
-+                      return -EINVAL;
-+
-               w[i] = p->weights[nstrict + i];
-+      }
-       if (!nstrict)
-               mode = TC_SCH_WRR8;
diff --git a/target/linux/airoha/patches-6.6/067-v6.15-net-airoha-Validate-egress-gdm-port-in-airoha_ppe_fo.patch b/target/linux/airoha/patches-6.6/067-v6.15-net-airoha-Validate-egress-gdm-port-in-airoha_ppe_fo.patch
deleted file mode 100644 (file)
index c6ddbde..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-From 09bccf56db36501ccb1935d921dc24451e9f57dd Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 1 Apr 2025 11:42:30 +0200
-Subject: [PATCH] net: airoha: Validate egress gdm port in
- airoha_ppe_foe_entry_prepare()
-
-Dev pointer in airoha_ppe_foe_entry_prepare routine is not strictly
-a device allocated by airoha_eth driver since it is an egress device
-and the flowtable can contain even wlan, pppoe or vlan devices. E.g:
-
-flowtable ft {
-        hook ingress priority filter
-        devices = { eth1, lan1, lan2, lan3, lan4, wlan0 }
-        flags offload                               ^
-                                                    |
-                     "not allocated by airoha_eth" --
-}
-
-In this case airoha_get_dsa_port() will just return the original device
-pointer and we can't assume netdev priv pointer points to an
-airoha_gdm_port struct.
-Fix the issue validating egress gdm port in airoha_ppe_foe_entry_prepare
-routine before accessing net_device priv pointer.
-
-Fixes: 00a7678310fe ("net: airoha: Introduce flowtable offload support")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250401-airoha-validate-egress-gdm-port-v4-1-c7315d33ce10@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 13 +++++++++++++
- drivers/net/ethernet/airoha/airoha_eth.h |  3 +++
- drivers/net/ethernet/airoha/airoha_ppe.c |  8 ++++++--
- 3 files changed, 22 insertions(+), 2 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -2452,6 +2452,19 @@ static void airoha_metadata_dst_free(str
-       }
- }
-+bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
-+                            struct airoha_gdm_port *port)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
-+              if (eth->ports[i] == port)
-+                      return true;
-+      }
-+
-+      return false;
-+}
-+
- static int airoha_alloc_gdm_port(struct airoha_eth *eth,
-                                struct device_node *np, int index)
- {
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -532,6 +532,9 @@ u32 airoha_rmw(void __iomem *base, u32 o
- #define airoha_qdma_clear(qdma, offset, val)                  \
-       airoha_rmw((qdma)->regs, (offset), (val), 0)
-+bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
-+                            struct airoha_gdm_port *port);
-+
- void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash);
- int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
-                                void *cb_priv);
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -197,7 +197,8 @@ static int airoha_get_dsa_port(struct ne
- #endif
- }
--static int airoha_ppe_foe_entry_prepare(struct airoha_foe_entry *hwe,
-+static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
-+                                      struct airoha_foe_entry *hwe,
-                                       struct net_device *dev, int type,
-                                       struct airoha_flow_data *data,
-                                       int l4proto)
-@@ -225,6 +226,9 @@ static int airoha_ppe_foe_entry_prepare(
-               struct airoha_gdm_port *port = netdev_priv(dev);
-               u8 pse_port;
-+              if (!airoha_is_valid_gdm_port(eth, port))
-+                      return -EINVAL;
-+
-               if (dsa_port >= 0)
-                       pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
-               else
-@@ -633,7 +637,7 @@ static int airoha_ppe_flow_offload_repla
-           !is_valid_ether_addr(data.eth.h_dest))
-               return -EINVAL;
--      err = airoha_ppe_foe_entry_prepare(&hwe, odev, offload_type,
-+      err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
-                                          &data, l4proto);
-       if (err)
-               return err;
diff --git a/target/linux/airoha/patches-6.6/068-01-v6.16-net-airoha-Add-l2_flows-rhashtable.patch b/target/linux/airoha/patches-6.6/068-01-v6.16-net-airoha-Add-l2_flows-rhashtable.patch
deleted file mode 100644 (file)
index 95f83f5..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-From b4916f67902e2ae1dc8e37dfa45e8894ad2f8921 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 9 Apr 2025 11:47:14 +0200
-Subject: [PATCH 1/2] net: airoha: Add l2_flows rhashtable
-
-Introduce l2_flows rhashtable in airoha_ppe struct in order to
-store L2 flows committed by upper layers of the kernel. This is a
-preliminary patch in order to offload L2 traffic rules.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
-Link: https://patch.msgid.link/20250409-airoha-flowtable-l2b-v2-1-4a1e3935ea92@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.h |  15 +++-
- drivers/net/ethernet/airoha/airoha_ppe.c | 103 ++++++++++++++++++-----
- 2 files changed, 98 insertions(+), 20 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -422,12 +422,23 @@ struct airoha_flow_data {
-       } pppoe;
- };
-+enum airoha_flow_entry_type {
-+      FLOW_TYPE_L4,
-+      FLOW_TYPE_L2,
-+      FLOW_TYPE_L2_SUBFLOW,
-+};
-+
- struct airoha_flow_table_entry {
--      struct hlist_node list;
-+      union {
-+              struct hlist_node list; /* PPE L3 flow entry */
-+              struct rhash_head l2_node; /* L2 flow entry */
-+      };
-       struct airoha_foe_entry data;
-       u32 hash;
-+      enum airoha_flow_entry_type type;
-+
-       struct rhash_head node;
-       unsigned long cookie;
- };
-@@ -480,6 +491,8 @@ struct airoha_ppe {
-       void *foe;
-       dma_addr_t foe_dma;
-+      struct rhashtable l2_flows;
-+
-       struct hlist_head *foe_flow;
-       u16 foe_check_time[PPE_NUM_ENTRIES];
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -24,6 +24,13 @@ static const struct rhashtable_params ai
-       .automatic_shrinking = true,
- };
-+static const struct rhashtable_params airoha_l2_flow_table_params = {
-+      .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
-+      .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
-+      .key_len = 2 * ETH_ALEN,
-+      .automatic_shrinking = true,
-+};
-+
- static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
- {
-       return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
-@@ -476,6 +483,43 @@ static int airoha_ppe_foe_commit_entry(s
-       return 0;
- }
-+static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
-+                                     struct airoha_flow_table_entry *e)
-+{
-+      lockdep_assert_held(&ppe_lock);
-+
-+      hlist_del_init(&e->list);
-+      if (e->hash != 0xffff) {
-+              e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
-+              e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
-+                                        AIROHA_FOE_STATE_INVALID);
-+              airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
-+              e->hash = 0xffff;
-+      }
-+}
-+
-+static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
-+                                        struct airoha_flow_table_entry *e)
-+{
-+      lockdep_assert_held(&ppe_lock);
-+
-+      rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
-+                             airoha_l2_flow_table_params);
-+}
-+
-+static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
-+                                           struct airoha_flow_table_entry *e)
-+{
-+      spin_lock_bh(&ppe_lock);
-+
-+      if (e->type == FLOW_TYPE_L2)
-+              airoha_ppe_foe_remove_l2_flow(ppe, e);
-+      else
-+              airoha_ppe_foe_remove_flow(ppe, e);
-+
-+      spin_unlock_bh(&ppe_lock);
-+}
-+
- static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
- {
-       struct airoha_flow_table_entry *e;
-@@ -505,11 +549,37 @@ unlock:
-       spin_unlock_bh(&ppe_lock);
- }
-+static int
-+airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
-+                                  struct airoha_flow_table_entry *e)
-+{
-+      struct airoha_flow_table_entry *prev;
-+
-+      e->type = FLOW_TYPE_L2;
-+      prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
-+                                               airoha_l2_flow_table_params);
-+      if (!prev)
-+              return 0;
-+
-+      if (IS_ERR(prev))
-+              return PTR_ERR(prev);
-+
-+      return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
-+                                     &e->l2_node,
-+                                     airoha_l2_flow_table_params);
-+}
-+
- static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
-                                           struct airoha_flow_table_entry *e)
- {
--      u32 hash = airoha_ppe_foe_get_entry_hash(&e->data);
-+      int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
-+      u32 hash;
-+      if (type == PPE_PKT_TYPE_BRIDGE)
-+              return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
-+
-+      hash = airoha_ppe_foe_get_entry_hash(&e->data);
-+      e->type = FLOW_TYPE_L4;
-       e->hash = 0xffff;
-       spin_lock_bh(&ppe_lock);
-@@ -519,23 +589,6 @@ static int airoha_ppe_foe_flow_commit_en
-       return 0;
- }
--static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
--                                           struct airoha_flow_table_entry *e)
--{
--      spin_lock_bh(&ppe_lock);
--
--      hlist_del_init(&e->list);
--      if (e->hash != 0xffff) {
--              e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
--              e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
--                                        AIROHA_FOE_STATE_INVALID);
--              airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
--              e->hash = 0xffff;
--      }
--
--      spin_unlock_bh(&ppe_lock);
--}
--
- static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
-                                          struct flow_cls_offload *f)
- {
-@@ -890,9 +943,20 @@ int airoha_ppe_init(struct airoha_eth *e
-       if (err)
-               return err;
-+      err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
-+      if (err)
-+              goto error_flow_table_destroy;
-+
-       err = airoha_ppe_debugfs_init(ppe);
-       if (err)
--              rhashtable_destroy(&eth->flow_table);
-+              goto error_l2_flow_table_destroy;
-+
-+      return 0;
-+
-+error_l2_flow_table_destroy:
-+      rhashtable_destroy(&ppe->l2_flows);
-+error_flow_table_destroy:
-+      rhashtable_destroy(&eth->flow_table);
-       return err;
- }
-@@ -909,6 +973,7 @@ void airoha_ppe_deinit(struct airoha_eth
-       }
-       rcu_read_unlock();
-+      rhashtable_destroy(&eth->ppe->l2_flows);
-       rhashtable_destroy(&eth->flow_table);
-       debugfs_remove(eth->ppe->debugfs_dir);
- }
diff --git a/target/linux/airoha/patches-6.6/068-02-v6.16-net-airoha-Add-L2-hw-acceleration-support.patch b/target/linux/airoha/patches-6.6/068-02-v6.16-net-airoha-Add-L2-hw-acceleration-support.patch
deleted file mode 100644 (file)
index 2375962..0000000
+++ /dev/null
@@ -1,253 +0,0 @@
-From cd53f622611f9a6dd83b858c85448dd3568b67ec Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 9 Apr 2025 11:47:15 +0200
-Subject: [PATCH 2/2] net: airoha: Add L2 hw acceleration support
-
-Similar to mtk driver, introduce the capability to offload L2 traffic
-defining flower rules in the PSE/PPE engine available on EN7581 SoC.
-Since the hw always reports L2/L3/L4 flower rules, link all L2 rules
-sharing the same L2 info (with different L3/L4 info) in the L2 subflows
-list of a given L2 PPE entry.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
-Link: https://patch.msgid.link/20250409-airoha-flowtable-l2b-v2-2-4a1e3935ea92@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c |   2 +-
- drivers/net/ethernet/airoha/airoha_eth.h |   9 +-
- drivers/net/ethernet/airoha/airoha_ppe.c | 121 ++++++++++++++++++++---
- 3 files changed, 115 insertions(+), 17 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -694,7 +694,7 @@ static int airoha_qdma_rx_process(struct
-               reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
-               if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
--                      airoha_ppe_check_skb(eth->ppe, hash);
-+                      airoha_ppe_check_skb(eth->ppe, q->skb, hash);
-               done++;
-               napi_gro_receive(&q->napi, q->skb);
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -431,10 +431,14 @@ enum airoha_flow_entry_type {
- struct airoha_flow_table_entry {
-       union {
-               struct hlist_node list; /* PPE L3 flow entry */
--              struct rhash_head l2_node; /* L2 flow entry */
-+              struct {
-+                      struct rhash_head l2_node;  /* L2 flow entry */
-+                      struct hlist_head l2_flows; /* PPE L2 subflows list */
-+              };
-       };
-       struct airoha_foe_entry data;
-+      struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */
-       u32 hash;
-       enum airoha_flow_entry_type type;
-@@ -548,7 +552,8 @@ u32 airoha_rmw(void __iomem *base, u32 o
- bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
-                             struct airoha_gdm_port *port);
--void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash);
-+void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
-+                        u16 hash);
- int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
-                                void *cb_priv);
- int airoha_ppe_init(struct airoha_eth *eth);
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -204,6 +204,15 @@ static int airoha_get_dsa_port(struct ne
- #endif
- }
-+static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
-+                                          struct ethhdr *eh)
-+{
-+      br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
-+      br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
-+      br->src_mac_hi = get_unaligned_be16(eh->h_source);
-+      br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
-+}
-+
- static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
-                                       struct airoha_foe_entry *hwe,
-                                       struct net_device *dev, int type,
-@@ -254,13 +263,7 @@ static int airoha_ppe_foe_entry_prepare(
-       qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
-       if (type == PPE_PKT_TYPE_BRIDGE) {
--              hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
--              hwe->bridge.dest_mac_lo =
--                      get_unaligned_be16(data->eth.h_dest + 4);
--              hwe->bridge.src_mac_hi =
--                      get_unaligned_be16(data->eth.h_source);
--              hwe->bridge.src_mac_lo =
--                      get_unaligned_be32(data->eth.h_source + 2);
-+              airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
-               hwe->bridge.data = qdata;
-               hwe->bridge.ib2 = val;
-               l2 = &hwe->bridge.l2.common;
-@@ -385,6 +388,19 @@ static u32 airoha_ppe_foe_get_entry_hash
-               hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
-               hv3 ^= hwe->ipv6.src_ip[0];
-               break;
-+      case PPE_PKT_TYPE_BRIDGE: {
-+              struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
-+
-+              hv1 = l2->common.src_mac_hi & 0xffff;
-+              hv1 = hv1 << 16 | l2->src_mac_lo;
-+
-+              hv2 = l2->common.dest_mac_lo;
-+              hv2 = hv2 << 16;
-+              hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
-+
-+              hv3 = l2->common.dest_mac_hi;
-+              break;
-+      }
-       case PPE_PKT_TYPE_IPV4_DSLITE:
-       case PPE_PKT_TYPE_IPV6_6RD:
-       default:
-@@ -496,15 +512,24 @@ static void airoha_ppe_foe_remove_flow(s
-               airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
-               e->hash = 0xffff;
-       }
-+      if (e->type == FLOW_TYPE_L2_SUBFLOW) {
-+              hlist_del_init(&e->l2_subflow_node);
-+              kfree(e);
-+      }
- }
- static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
-                                         struct airoha_flow_table_entry *e)
- {
-+      struct hlist_head *head = &e->l2_flows;
-+      struct hlist_node *n;
-+
-       lockdep_assert_held(&ppe_lock);
-       rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
-                              airoha_l2_flow_table_params);
-+      hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
-+              airoha_ppe_foe_remove_flow(ppe, e);
- }
- static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
-@@ -520,10 +545,56 @@ static void airoha_ppe_foe_flow_remove_e
-       spin_unlock_bh(&ppe_lock);
- }
--static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
-+static int
-+airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
-+                                  struct airoha_flow_table_entry *e,
-+                                  u32 hash)
-+{
-+      u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
-+      struct airoha_foe_entry *hwe_p, hwe;
-+      struct airoha_flow_table_entry *f;
-+      struct airoha_foe_mac_info *l2;
-+      int type;
-+
-+      hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
-+      if (!hwe_p)
-+              return -EINVAL;
-+
-+      f = kzalloc(sizeof(*f), GFP_ATOMIC);
-+      if (!f)
-+              return -ENOMEM;
-+
-+      hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
-+      f->type = FLOW_TYPE_L2_SUBFLOW;
-+      f->hash = hash;
-+
-+      memcpy(&hwe, hwe_p, sizeof(*hwe_p));
-+      hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
-+      l2 = &hwe.bridge.l2;
-+      memcpy(l2, &e->data.bridge.l2, sizeof(*l2));
-+
-+      type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
-+      if (type == PPE_PKT_TYPE_IPV4_HNAPT)
-+              memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
-+                     sizeof(hwe.ipv4.new_tuple));
-+      else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T &&
-+               l2->common.etype == ETH_P_IP)
-+              l2->common.etype = ETH_P_IPV6;
-+
-+      hwe.bridge.ib2 = e->data.bridge.ib2;
-+      airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
-+
-+      return 0;
-+}
-+
-+static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
-+                                      struct sk_buff *skb,
-+                                      u32 hash)
- {
-       struct airoha_flow_table_entry *e;
-+      struct airoha_foe_bridge br = {};
-       struct airoha_foe_entry *hwe;
-+      bool commit_done = false;
-       struct hlist_node *n;
-       u32 index, state;
-@@ -539,12 +610,33 @@ static void airoha_ppe_foe_insert_entry(
-       index = airoha_ppe_foe_get_entry_hash(hwe);
-       hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
--              if (airoha_ppe_foe_compare_entry(e, hwe)) {
--                      airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
--                      e->hash = hash;
--                      break;
-+              if (e->type == FLOW_TYPE_L2_SUBFLOW) {
-+                      state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
-+                      if (state != AIROHA_FOE_STATE_BIND) {
-+                              e->hash = 0xffff;
-+                              airoha_ppe_foe_remove_flow(ppe, e);
-+                      }
-+                      continue;
-+              }
-+
-+              if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
-+                      e->hash = 0xffff;
-+                      continue;
-               }
-+
-+              airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
-+              commit_done = true;
-+              e->hash = hash;
-       }
-+
-+      if (commit_done)
-+              goto unlock;
-+
-+      airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
-+      e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
-+                                 airoha_l2_flow_table_params);
-+      if (e)
-+              airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
- unlock:
-       spin_unlock_bh(&ppe_lock);
- }
-@@ -899,7 +991,8 @@ int airoha_ppe_setup_tc_block_cb(enum tc
-       return err;
- }
--void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash)
-+void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
-+                        u16 hash)
- {
-       u16 now, diff;
-@@ -912,7 +1005,7 @@ void airoha_ppe_check_skb(struct airoha_
-               return;
-       ppe->foe_check_time[hash] = now;
--      airoha_ppe_foe_insert_entry(ppe, hash);
-+      airoha_ppe_foe_insert_entry(ppe, skb, hash);
- }
- int airoha_ppe_init(struct airoha_eth *eth)
diff --git a/target/linux/airoha/patches-6.6/069-v6.16-net-airoha-Add-matchall-filter-offload-support.patch b/target/linux/airoha/patches-6.6/069-v6.16-net-airoha-Add-matchall-filter-offload-support.patch
deleted file mode 100644 (file)
index ba2a2bf..0000000
+++ /dev/null
@@ -1,405 +0,0 @@
-From df8398fb7bb7a0e509200af56b79343aa133b7d6 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 15 Apr 2025 09:14:34 +0200
-Subject: [PATCH] net: airoha: Add matchall filter offload support
-
-Introduce tc matchall filter offload support in airoha_eth driver.
-Matchall hw filter is used to implement hw rate policing via tc action
-police:
-
-$tc qdisc add dev eth0 handle ffff: ingress
-$tc filter add dev eth0 parent ffff: matchall action police \
- rate 100mbit burst 1000k drop
-
-The current implementation supports just drop/accept as exceed/notexceed
-actions. Moreover, rate and burst are the only supported configuration
-parameters.
-
-Reviewed-by: Davide Caratti <dcaratti@redhat.com>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250415-airoha-hw-rx-ratelimit-v4-1-03458784fbc3@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c  | 273 +++++++++++++++++++++-
- drivers/net/ethernet/airoha/airoha_eth.h  |   8 +-
- drivers/net/ethernet/airoha/airoha_ppe.c  |   9 +-
- drivers/net/ethernet/airoha/airoha_regs.h |   7 +
- 4 files changed, 286 insertions(+), 11 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -527,6 +527,25 @@ static int airoha_fe_init(struct airoha_
-       /* disable IFC by default */
-       airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
-+      airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0),
-+                   FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM1) |
-+                   FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM1) |
-+                   FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM1) |
-+                   FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM1) |
-+                   FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM1) |
-+                   FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM1) |
-+                   FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM1) |
-+                   FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM1));
-+      airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(1),
-+                   FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM2) |
-+                   FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM2) |
-+                   FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM2) |
-+                   FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM2) |
-+                   FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM2) |
-+                   FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM2) |
-+                   FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM2) |
-+                   FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM2));
-+
-       /* enable 1:N vlan action, init vlan table */
-       airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
-@@ -1632,7 +1651,6 @@ static void airhoha_set_gdm2_loopback(st
-       if (port->id == 3) {
-               /* FIXME: handle XSI_PCE1_PORT */
--              airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0),  0x5500);
-               airoha_fe_rmw(eth, REG_FE_WAN_PORT,
-                             WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
-                             FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT));
-@@ -2107,6 +2125,125 @@ static int airoha_tc_setup_qdisc_ets(str
-       }
- }
-+static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id,
-+                                  u32 addr, enum trtcm_param_type param,
-+                                  u32 *val_low, u32 *val_high)
-+{
-+      u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
-+      u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
-+                        FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
-+                        FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
-+
-+      airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
-+      if (read_poll_timeout(airoha_qdma_rr, val,
-+                            val & RATE_LIMIT_PARAM_RW_DONE_MASK,
-+                            USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma,
-+                            REG_TRTCM_CFG_PARAM(addr)))
-+              return -ETIMEDOUT;
-+
-+      *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
-+      if (val_high)
-+              *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
-+
-+      return 0;
-+}
-+
-+static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id,
-+                                  u32 addr, enum trtcm_param_type param,
-+                                  u32 val)
-+{
-+      u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
-+      u32 config = RATE_LIMIT_PARAM_RW_MASK |
-+                   FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
-+                   FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
-+                   FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
-+
-+      airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
-+      airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
-+
-+      return read_poll_timeout(airoha_qdma_rr, val,
-+                               val & RATE_LIMIT_PARAM_RW_DONE_MASK,
-+                               USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
-+                               qdma, REG_TRTCM_CFG_PARAM(addr));
-+}
-+
-+static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id,
-+                                   u32 addr, bool enable, u32 enable_mask)
-+{
-+      u32 val;
-+      int err;
-+
-+      err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
-+                                     &val, NULL);
-+      if (err)
-+              return err;
-+
-+      val = enable ? val | enable_mask : val & ~enable_mask;
-+
-+      return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
-+                                      val);
-+}
-+
-+static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma,
-+                                         int queue_id, u32 rate_val,
-+                                         u32 bucket_size)
-+{
-+      u32 val, config, tick, unit, rate, rate_frac;
-+      int err;
-+
-+      err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
-+                                     TRTCM_MISC_MODE, &config, NULL);
-+      if (err)
-+              return err;
-+
-+      val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG);
-+      tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
-+      if (config & TRTCM_TICK_SEL)
-+              tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
-+      if (!tick)
-+              return -EINVAL;
-+
-+      unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
-+      if (!unit)
-+              return -EINVAL;
-+
-+      rate = rate_val / unit;
-+      rate_frac = rate_val % unit;
-+      rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
-+      rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
-+             FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
-+
-+      err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
-+                                     TRTCM_TOKEN_RATE_MODE, rate);
-+      if (err)
-+              return err;
-+
-+      val = bucket_size;
-+      if (!(config & TRTCM_PKT_MODE))
-+              val = max_t(u32, val, MIN_TOKEN_SIZE);
-+      val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
-+
-+      return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
-+                                      TRTCM_BUCKETSIZE_SHIFT_MODE, val);
-+}
-+
-+static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id,
-+                                    bool enable, enum trtcm_unit_type unit)
-+{
-+      bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8;
-+      enum trtcm_param mode = TRTCM_METER_MODE;
-+      int err;
-+
-+      mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0;
-+      err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
-+                                      enable, mode);
-+      if (err)
-+              return err;
-+
-+      return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
-+                                       tick_sel, TRTCM_TICK_SEL);
-+}
-+
- static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
-                                      u32 addr, enum trtcm_param_type param,
-                                      enum trtcm_mode_type mode,
-@@ -2271,10 +2408,142 @@ static int airoha_tc_htb_alloc_leaf_queu
-       return 0;
- }
-+static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port,
-+                                  u32 rate, u32 bucket_size,
-+                                  enum trtcm_unit_type unit_type)
-+{
-+      struct airoha_qdma *qdma = port->qdma;
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-+              int err;
-+
-+              if (!qdma->q_rx[i].ndesc)
-+                      continue;
-+
-+              err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type);
-+              if (err)
-+                      return err;
-+
-+              err = airoha_qdma_set_rl_token_bucket(qdma, i, rate,
-+                                                    bucket_size);
-+              if (err)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f)
-+{
-+      const struct flow_action *actions = &f->rule->action;
-+      const struct flow_action_entry *act;
-+
-+      if (!flow_action_has_entries(actions)) {
-+              NL_SET_ERR_MSG_MOD(f->common.extack,
-+                                 "filter run with no actions");
-+              return -EINVAL;
-+      }
-+
-+      if (!flow_offload_has_one_action(actions)) {
-+              NL_SET_ERR_MSG_MOD(f->common.extack,
-+                                 "only once action per filter is supported");
-+              return -EOPNOTSUPP;
-+      }
-+
-+      act = &actions->entries[0];
-+      if (act->id != FLOW_ACTION_POLICE) {
-+              NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action");
-+              return -EOPNOTSUPP;
-+      }
-+
-+      if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
-+              NL_SET_ERR_MSG_MOD(f->common.extack,
-+                                 "invalid exceed action id");
-+              return -EOPNOTSUPP;
-+      }
-+
-+      if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
-+              NL_SET_ERR_MSG_MOD(f->common.extack,
-+                                 "invalid notexceed action id");
-+              return -EOPNOTSUPP;
-+      }
-+
-+      if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
-+          !flow_action_is_last_entry(actions, act)) {
-+              NL_SET_ERR_MSG_MOD(f->common.extack,
-+                                 "action accept must be last");
-+              return -EOPNOTSUPP;
-+      }
-+
-+      if (act->police.peakrate_bytes_ps || act->police.avrate ||
-+          act->police.overhead || act->police.mtu) {
-+              NL_SET_ERR_MSG_MOD(f->common.extack,
-+                                 "peakrate/avrate/overhead/mtu unsupported");
-+              return -EOPNOTSUPP;
-+      }
-+
-+      return 0;
-+}
-+
-+static int airoha_dev_tc_matchall(struct net_device *dev,
-+                                struct tc_cls_matchall_offload *f)
-+{
-+      enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT;
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      u32 rate = 0, bucket_size = 0;
-+
-+      switch (f->command) {
-+      case TC_CLSMATCHALL_REPLACE: {
-+              const struct flow_action_entry *act;
-+              int err;
-+
-+              err = airoha_tc_matchall_act_validate(f);
-+              if (err)
-+                      return err;
-+
-+              act = &f->rule->action.entries[0];
-+              if (act->police.rate_pkt_ps) {
-+                      rate = act->police.rate_pkt_ps;
-+                      bucket_size = act->police.burst_pkt;
-+                      unit_type = TRTCM_PACKET_UNIT;
-+              } else {
-+                      rate = div_u64(act->police.rate_bytes_ps, 1000);
-+                      rate = rate << 3; /* Kbps */
-+                      bucket_size = act->police.burst;
-+              }
-+              fallthrough;
-+      }
-+      case TC_CLSMATCHALL_DESTROY:
-+              return airoha_qdma_set_rx_meter(port, rate, bucket_size,
-+                                              unit_type);
-+      default:
-+              return -EOPNOTSUPP;
-+      }
-+}
-+
-+static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
-+                                      void *type_data, void *cb_priv)
-+{
-+      struct net_device *dev = cb_priv;
-+
-+      if (!tc_can_offload(dev))
-+              return -EOPNOTSUPP;
-+
-+      switch (type) {
-+      case TC_SETUP_CLSFLOWER:
-+              return airoha_ppe_setup_tc_block_cb(dev, type_data);
-+      case TC_SETUP_CLSMATCHALL:
-+              return airoha_dev_tc_matchall(dev, type_data);
-+      default:
-+              return -EOPNOTSUPP;
-+      }
-+}
-+
- static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
-                                    struct flow_block_offload *f)
- {
--      flow_setup_cb_t *cb = airoha_ppe_setup_tc_block_cb;
-+      flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb;
-       static LIST_HEAD(block_cb_list);
-       struct flow_block_cb *block_cb;
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -127,6 +127,11 @@ enum tx_sched_mode {
-       TC_SCH_WRR2,
- };
-+enum trtcm_unit_type {
-+      TRTCM_BYTE_UNIT,
-+      TRTCM_PACKET_UNIT,
-+};
-+
- enum trtcm_param_type {
-       TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
-       TRTCM_TOKEN_RATE_MODE,
-@@ -554,8 +559,7 @@ bool airoha_is_valid_gdm_port(struct air
- void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
-                         u16 hash);
--int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
--                               void *cb_priv);
-+int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
- int airoha_ppe_init(struct airoha_eth *eth);
- void airoha_ppe_deinit(struct airoha_eth *eth);
- struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -967,18 +967,13 @@ error_npu_put:
-       return err;
- }
--int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
--                               void *cb_priv)
-+int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
- {
--      struct flow_cls_offload *cls = type_data;
--      struct net_device *dev = cb_priv;
-       struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct flow_cls_offload *cls = type_data;
-       struct airoha_eth *eth = port->qdma->eth;
-       int err = 0;
--      if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER)
--              return -EOPNOTSUPP;
--
-       mutex_lock(&flow_offload_mutex);
-       if (!eth->npu)
---- a/drivers/net/ethernet/airoha/airoha_regs.h
-+++ b/drivers/net/ethernet/airoha/airoha_regs.h
-@@ -283,6 +283,7 @@
- #define PPE_HASH_SEED                         0x12345678
- #define REG_PPE_DFT_CPORT0(_n)                        (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
-+#define DFT_CPORT_MASK(_n)                    GENMASK(3 + ((_n) << 2), ((_n) << 2))
- #define REG_PPE_DFT_CPORT1(_n)                        (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
-@@ -691,6 +692,12 @@
- #define REG_TRTCM_DATA_LOW(_n)                ((_n) + 0x8)
- #define REG_TRTCM_DATA_HIGH(_n)               ((_n) + 0xc)
-+#define RATE_LIMIT_PARAM_RW_MASK      BIT(31)
-+#define RATE_LIMIT_PARAM_RW_DONE_MASK BIT(30)
-+#define RATE_LIMIT_PARAM_TYPE_MASK    GENMASK(29, 28)
-+#define RATE_LIMIT_METER_GROUP_MASK   GENMASK(27, 26)
-+#define RATE_LIMIT_PARAM_INDEX_MASK   GENMASK(23, 16)
-+
- #define REG_TXWRR_MODE_CFG            0x1020
- #define TWRR_WEIGHT_SCALE_MASK                BIT(31)
- #define TWRR_WEIGHT_BASE_MASK         BIT(3)
diff --git a/target/linux/airoha/patches-6.6/070-01-v6.16-net-airoha-Introduce-airoha_irq_bank-struct.patch b/target/linux/airoha/patches-6.6/070-01-v6.16-net-airoha-Introduce-airoha_irq_bank-struct.patch
deleted file mode 100644 (file)
index dd5dc16..0000000
+++ /dev/null
@@ -1,292 +0,0 @@
-From 9439db26d3ee4a897e5cd108864172531f31ce07 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 18 Apr 2025 12:40:49 +0200
-Subject: [PATCH 1/2] net: airoha: Introduce airoha_irq_bank struct
-
-EN7581 ethernet SoC supports 4 programmable IRQ lines each one composed
-by 4 IRQ configuration registers. Add airoha_irq_bank struct as a
-container for independent IRQ lines info (e.g. IRQ number, enabled source
-interrupts, ecc). This is a preliminary patch to support multiple IRQ lines
-in airoha_eth driver.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250418-airoha-eth-multi-irq-v1-1-1ab0083ca3c1@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c  | 106 ++++++++++++++--------
- drivers/net/ethernet/airoha/airoha_eth.h  |  13 ++-
- drivers/net/ethernet/airoha/airoha_regs.h |  11 ++-
- 3 files changed, 86 insertions(+), 44 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -34,37 +34,40 @@ u32 airoha_rmw(void __iomem *base, u32 o
-       return val;
- }
--static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
--                                  u32 clear, u32 set)
-+static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
-+                                  int index, u32 clear, u32 set)
- {
-+      struct airoha_qdma *qdma = irq_bank->qdma;
-+      int bank = irq_bank - &qdma->irq_banks[0];
-       unsigned long flags;
--      if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
-+      if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
-               return;
--      spin_lock_irqsave(&qdma->irq_lock, flags);
-+      spin_lock_irqsave(&irq_bank->irq_lock, flags);
--      qdma->irqmask[index] &= ~clear;
--      qdma->irqmask[index] |= set;
--      airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
-+      irq_bank->irqmask[index] &= ~clear;
-+      irq_bank->irqmask[index] |= set;
-+      airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
-+                     irq_bank->irqmask[index]);
-       /* Read irq_enable register in order to guarantee the update above
-        * completes in the spinlock critical section.
-        */
--      airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
-+      airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
--      spin_unlock_irqrestore(&qdma->irq_lock, flags);
-+      spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
- }
--static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
--                                 u32 mask)
-+static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
-+                                 int index, u32 mask)
- {
--      airoha_qdma_set_irqmask(qdma, index, 0, mask);
-+      airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
- }
--static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
--                                  u32 mask)
-+static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
-+                                  int index, u32 mask)
- {
--      airoha_qdma_set_irqmask(qdma, index, mask, 0);
-+      airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
- }
- static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
-@@ -732,6 +735,7 @@ free_frag:
- static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
- {
-       struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
-+      struct airoha_irq_bank *irq_bank = &q->qdma->irq_banks[0];
-       int cur, done = 0;
-       do {
-@@ -740,7 +744,7 @@ static int airoha_qdma_rx_napi_poll(stru
-       } while (cur && done < budget);
-       if (done < budget && napi_complete(napi))
--              airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
-+              airoha_qdma_irq_enable(irq_bank, QDMA_INT_REG_IDX1,
-                                      RX_DONE_INT_MASK);
-       return done;
-@@ -945,7 +949,7 @@ unlock:
-       }
-       if (done < budget && napi_complete(napi))
--              airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
-+              airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
-                                      TX_DONE_INT_MASK(id));
-       return done;
-@@ -1176,13 +1180,16 @@ static int airoha_qdma_hw_init(struct ai
-       int i;
-       /* clear pending irqs */
--      for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
-+      for (i = 0; i < ARRAY_SIZE(qdma->irq_banks[0].irqmask); i++)
-               airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
-       /* setup irqs */
--      airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
--      airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
--      airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
-+      airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
-+                             INT_IDX0_MASK);
-+      airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX1,
-+                             INT_IDX1_MASK);
-+      airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
-+                             INT_IDX4_MASK);
-       /* setup irq binding */
-       for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-@@ -1227,13 +1234,14 @@ static int airoha_qdma_hw_init(struct ai
- static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
- {
--      struct airoha_qdma *qdma = dev_instance;
--      u32 intr[ARRAY_SIZE(qdma->irqmask)];
-+      struct airoha_irq_bank *irq_bank = dev_instance;
-+      struct airoha_qdma *qdma = irq_bank->qdma;
-+      u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
-       int i;
--      for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
-+      for (i = 0; i < ARRAY_SIZE(intr); i++) {
-               intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
--              intr[i] &= qdma->irqmask[i];
-+              intr[i] &= irq_bank->irqmask[i];
-               airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
-       }
-@@ -1241,7 +1249,7 @@ static irqreturn_t airoha_irq_handler(in
-               return IRQ_NONE;
-       if (intr[1] & RX_DONE_INT_MASK) {
--              airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
-+              airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1,
-                                       RX_DONE_INT_MASK);
-               for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
-@@ -1258,7 +1266,7 @@ static irqreturn_t airoha_irq_handler(in
-                       if (!(intr[0] & TX_DONE_INT_MASK(i)))
-                               continue;
--                      airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
-+                      airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
-                                               TX_DONE_INT_MASK(i));
-                       napi_schedule(&qdma->q_tx_irq[i].napi);
-               }
-@@ -1267,6 +1275,39 @@ static irqreturn_t airoha_irq_handler(in
-       return IRQ_HANDLED;
- }
-+static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
-+                                    struct airoha_qdma *qdma)
-+{
-+      struct airoha_eth *eth = qdma->eth;
-+      int i, id = qdma - &eth->qdma[0];
-+
-+      for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
-+              struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
-+              int err, irq_index = 4 * id + i;
-+              const char *name;
-+
-+              spin_lock_init(&irq_bank->irq_lock);
-+              irq_bank->qdma = qdma;
-+
-+              irq_bank->irq = platform_get_irq(pdev, irq_index);
-+              if (irq_bank->irq < 0)
-+                      return irq_bank->irq;
-+
-+              name = devm_kasprintf(eth->dev, GFP_KERNEL,
-+                                    KBUILD_MODNAME ".%d", irq_index);
-+              if (!name)
-+                      return -ENOMEM;
-+
-+              err = devm_request_irq(eth->dev, irq_bank->irq,
-+                                     airoha_irq_handler, IRQF_SHARED, name,
-+                                     irq_bank);
-+              if (err)
-+                      return err;
-+      }
-+
-+      return 0;
-+}
-+
- static int airoha_qdma_init(struct platform_device *pdev,
-                           struct airoha_eth *eth,
-                           struct airoha_qdma *qdma)
-@@ -1274,9 +1315,7 @@ static int airoha_qdma_init(struct platf
-       int err, id = qdma - &eth->qdma[0];
-       const char *res;
--      spin_lock_init(&qdma->irq_lock);
-       qdma->eth = eth;
--
-       res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
-       if (!res)
-               return -ENOMEM;
-@@ -1286,12 +1325,7 @@ static int airoha_qdma_init(struct platf
-               return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
-                                    "failed to iomap qdma%d regs\n", id);
--      qdma->irq = platform_get_irq(pdev, 4 * id);
--      if (qdma->irq < 0)
--              return qdma->irq;
--
--      err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
--                             IRQF_SHARED, KBUILD_MODNAME, qdma);
-+      err = airoha_qdma_init_irq_banks(pdev, qdma);
-       if (err)
-               return err;
-@@ -2782,7 +2816,7 @@ static int airoha_alloc_gdm_port(struct
-       dev->features |= dev->hw_features;
-       dev->vlan_features = dev->hw_features;
-       dev->dev.of_node = np;
--      dev->irq = qdma->irq;
-+      dev->irq = qdma->irq_banks[0].irq;
-       SET_NETDEV_DEV(dev, eth->dev);
-       /* reserve hw queues for HTB offloading */
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -17,6 +17,7 @@
- #define AIROHA_MAX_NUM_GDM_PORTS      4
- #define AIROHA_MAX_NUM_QDMA           2
-+#define AIROHA_MAX_NUM_IRQ_BANKS      1
- #define AIROHA_MAX_DSA_PORTS          7
- #define AIROHA_MAX_NUM_RSTS           3
- #define AIROHA_MAX_NUM_XSI_RSTS               5
-@@ -452,17 +453,23 @@ struct airoha_flow_table_entry {
-       unsigned long cookie;
- };
--struct airoha_qdma {
--      struct airoha_eth *eth;
--      void __iomem *regs;
-+struct airoha_irq_bank {
-+      struct airoha_qdma *qdma;
-       /* protect concurrent irqmask accesses */
-       spinlock_t irq_lock;
-       u32 irqmask[QDMA_INT_REG_MAX];
-       int irq;
-+};
-+
-+struct airoha_qdma {
-+      struct airoha_eth *eth;
-+      void __iomem *regs;
-       atomic_t users;
-+      struct airoha_irq_bank irq_banks[AIROHA_MAX_NUM_IRQ_BANKS];
-+
-       struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
-       struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
---- a/drivers/net/ethernet/airoha/airoha_regs.h
-+++ b/drivers/net/ethernet/airoha/airoha_regs.h
-@@ -423,11 +423,12 @@
-        ((_n) == 2) ? 0x0720 :         \
-        ((_n) == 1) ? 0x0024 : 0x0020)
--#define REG_INT_ENABLE(_n)            \
--      (((_n) == 4) ? 0x0750 :         \
--       ((_n) == 3) ? 0x0744 :         \
--       ((_n) == 2) ? 0x0740 :         \
--       ((_n) == 1) ? 0x002c : 0x0028)
-+#define REG_INT_ENABLE(_b, _n)                \
-+      (((_n) == 4) ? 0x0750 + ((_b) << 5) :   \
-+       ((_n) == 3) ? 0x0744 + ((_b) << 5) :   \
-+       ((_n) == 2) ? 0x0740 + ((_b) << 5) :   \
-+       ((_n) == 1) ? 0x002c + ((_b) << 3) :   \
-+                     0x0028 + ((_b) << 3))
- /* QDMA_CSR_INT_ENABLE1 */
- #define RX15_COHERENT_INT_MASK                BIT(31)
diff --git a/target/linux/airoha/patches-6.6/070-02-v6.16-net-airoha-Enable-multiple-IRQ-lines-support-in-airo.patch b/target/linux/airoha/patches-6.6/070-02-v6.16-net-airoha-Enable-multiple-IRQ-lines-support-in-airo.patch
deleted file mode 100644 (file)
index db4494e..0000000
+++ /dev/null
@@ -1,379 +0,0 @@
-From f252493e1835366fc25ce631c3056f900977dd11 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 18 Apr 2025 12:40:50 +0200
-Subject: [PATCH 2/2] net: airoha: Enable multiple IRQ lines support in
- airoha_eth driver.
-
-EN7581 ethernet SoC supports 4 programmable IRQ lines for Tx and Rx
-interrupts. Enable multiple IRQ lines support. Map Rx/Tx queues to the
-available IRQ lines using the default scheme used in the vendor SDK:
-
-- IRQ0: rx queues [0-4],[7-9],15
-- IRQ1: rx queues [21-30]
-- IRQ2: rx queues 5
-- IRQ3: rx queues 6
-
-Tx queues interrupts are managed by IRQ0.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250418-airoha-eth-multi-irq-v1-2-1ab0083ca3c1@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c  |  67 +++++---
- drivers/net/ethernet/airoha/airoha_eth.h  |  13 +-
- drivers/net/ethernet/airoha/airoha_regs.h | 185 +++++++++++++++++-----
- 3 files changed, 206 insertions(+), 59 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -735,7 +735,6 @@ free_frag:
- static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
- {
-       struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
--      struct airoha_irq_bank *irq_bank = &q->qdma->irq_banks[0];
-       int cur, done = 0;
-       do {
-@@ -743,9 +742,20 @@ static int airoha_qdma_rx_napi_poll(stru
-               done += cur;
-       } while (cur && done < budget);
--      if (done < budget && napi_complete(napi))
--              airoha_qdma_irq_enable(irq_bank, QDMA_INT_REG_IDX1,
--                                     RX_DONE_INT_MASK);
-+      if (done < budget && napi_complete(napi)) {
-+              struct airoha_qdma *qdma = q->qdma;
-+              int i, qid = q - &qdma->q_rx[0];
-+              int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
-+                                                       : QDMA_INT_REG_IDX2;
-+
-+              for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
-+                      if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
-+                              continue;
-+
-+                      airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
-+                                             BIT(qid % RX_DONE_HIGH_OFFSET));
-+              }
-+      }
-       return done;
- }
-@@ -1179,17 +1189,24 @@ static int airoha_qdma_hw_init(struct ai
- {
-       int i;
--      /* clear pending irqs */
--      for (i = 0; i < ARRAY_SIZE(qdma->irq_banks[0].irqmask); i++)
-+      for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
-+              /* clear pending irqs */
-               airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
--
--      /* setup irqs */
-+              /* setup rx irqs */
-+              airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
-+                                     INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
-+              airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
-+                                     INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
-+              airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
-+                                     INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
-+              airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
-+                                     INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
-+      }
-+      /* setup tx irqs */
-       airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
--                             INT_IDX0_MASK);
--      airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX1,
--                             INT_IDX1_MASK);
-+                             TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
-       airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
--                             INT_IDX4_MASK);
-+                             TX_COHERENT_HIGH_INT_MASK);
-       /* setup irq binding */
-       for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
-@@ -1236,6 +1253,7 @@ static irqreturn_t airoha_irq_handler(in
- {
-       struct airoha_irq_bank *irq_bank = dev_instance;
-       struct airoha_qdma *qdma = irq_bank->qdma;
-+      u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
-       u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
-       int i;
-@@ -1248,17 +1266,24 @@ static irqreturn_t airoha_irq_handler(in
-       if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
-               return IRQ_NONE;
--      if (intr[1] & RX_DONE_INT_MASK) {
--              airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1,
--                                      RX_DONE_INT_MASK);
-+      rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
-+      if (rx_intr1) {
-+              airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
-+              rx_intr_mask |= rx_intr1;
-+      }
--              for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
--                      if (!qdma->q_rx[i].ndesc)
--                              continue;
-+      rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
-+      if (rx_intr2) {
-+              airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
-+              rx_intr_mask |= (rx_intr2 << 16);
-+      }
--                      if (intr[1] & BIT(i))
--                              napi_schedule(&qdma->q_rx[i].napi);
--              }
-+      for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
-+              if (!qdma->q_rx[i].ndesc)
-+                      continue;
-+
-+              if (rx_intr_mask & BIT(i))
-+                      napi_schedule(&qdma->q_rx[i].napi);
-       }
-       if (intr[0] & INT_TX_MASK) {
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -17,7 +17,7 @@
- #define AIROHA_MAX_NUM_GDM_PORTS      4
- #define AIROHA_MAX_NUM_QDMA           2
--#define AIROHA_MAX_NUM_IRQ_BANKS      1
-+#define AIROHA_MAX_NUM_IRQ_BANKS      4
- #define AIROHA_MAX_DSA_PORTS          7
- #define AIROHA_MAX_NUM_RSTS           3
- #define AIROHA_MAX_NUM_XSI_RSTS               5
-@@ -453,6 +453,17 @@ struct airoha_flow_table_entry {
-       unsigned long cookie;
- };
-+/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
-+#define RX_IRQ0_BANK_PIN_MASK                 0x839f
-+#define RX_IRQ1_BANK_PIN_MASK                 0x7fe00000
-+#define RX_IRQ2_BANK_PIN_MASK                 0x20
-+#define RX_IRQ3_BANK_PIN_MASK                 0x40
-+#define RX_IRQ_BANK_PIN_MASK(_n)              \
-+      (((_n) == 3) ? RX_IRQ3_BANK_PIN_MASK :  \
-+       ((_n) == 2) ? RX_IRQ2_BANK_PIN_MASK :  \
-+       ((_n) == 1) ? RX_IRQ1_BANK_PIN_MASK :  \
-+       RX_IRQ0_BANK_PIN_MASK)
-+
- struct airoha_irq_bank {
-       struct airoha_qdma *qdma;
---- a/drivers/net/ethernet/airoha/airoha_regs.h
-+++ b/drivers/net/ethernet/airoha/airoha_regs.h
-@@ -463,6 +463,26 @@
- #define IRQ0_FULL_INT_MASK            BIT(1)
- #define IRQ0_INT_MASK                 BIT(0)
-+#define RX_COHERENT_LOW_INT_MASK                              \
-+      (RX15_COHERENT_INT_MASK | RX14_COHERENT_INT_MASK |      \
-+       RX13_COHERENT_INT_MASK | RX12_COHERENT_INT_MASK |      \
-+       RX11_COHERENT_INT_MASK | RX10_COHERENT_INT_MASK |      \
-+       RX9_COHERENT_INT_MASK | RX8_COHERENT_INT_MASK |        \
-+       RX7_COHERENT_INT_MASK | RX6_COHERENT_INT_MASK |        \
-+       RX5_COHERENT_INT_MASK | RX4_COHERENT_INT_MASK |        \
-+       RX3_COHERENT_INT_MASK | RX2_COHERENT_INT_MASK |        \
-+       RX1_COHERENT_INT_MASK | RX0_COHERENT_INT_MASK)
-+
-+#define RX_COHERENT_LOW_OFFSET        __ffs(RX_COHERENT_LOW_INT_MASK)
-+#define INT_RX0_MASK(_n)                                      \
-+      (((_n) << RX_COHERENT_LOW_OFFSET) & RX_COHERENT_LOW_INT_MASK)
-+
-+#define TX_COHERENT_LOW_INT_MASK                              \
-+      (TX7_COHERENT_INT_MASK | TX6_COHERENT_INT_MASK |        \
-+       TX5_COHERENT_INT_MASK | TX4_COHERENT_INT_MASK |        \
-+       TX3_COHERENT_INT_MASK | TX2_COHERENT_INT_MASK |        \
-+       TX1_COHERENT_INT_MASK | TX0_COHERENT_INT_MASK)
-+
- #define TX_DONE_INT_MASK(_n)                                  \
-       ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK              \
-             : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-@@ -471,17 +491,6 @@
-       (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK |                   \
-        IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
--#define INT_IDX0_MASK                                         \
--      (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK |        \
--       TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK |        \
--       TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK |        \
--       TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK |        \
--       RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK |        \
--       RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK |        \
--       RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK |        \
--       RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK |        \
--       RX15_COHERENT_INT_MASK | INT_TX_MASK)
--
- /* QDMA_CSR_INT_ENABLE2 */
- #define RX15_NO_CPU_DSCP_INT_MASK     BIT(31)
- #define RX14_NO_CPU_DSCP_INT_MASK     BIT(30)
-@@ -516,19 +525,121 @@
- #define RX1_DONE_INT_MASK             BIT(1)
- #define RX0_DONE_INT_MASK             BIT(0)
--#define RX_DONE_INT_MASK                                      \
--      (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK |                \
--       RX2_DONE_INT_MASK | RX3_DONE_INT_MASK |                \
--       RX4_DONE_INT_MASK | RX7_DONE_INT_MASK |                \
--       RX8_DONE_INT_MASK | RX9_DONE_INT_MASK |                \
--       RX15_DONE_INT_MASK)
--#define INT_IDX1_MASK                                         \
--      (RX_DONE_INT_MASK |                                     \
--       RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK |  \
--       RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK |  \
--       RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK |  \
--       RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK |  \
--       RX15_NO_CPU_DSCP_INT_MASK)
-+#define RX_NO_CPU_DSCP_LOW_INT_MASK                                   \
-+      (RX15_NO_CPU_DSCP_INT_MASK | RX14_NO_CPU_DSCP_INT_MASK |        \
-+       RX13_NO_CPU_DSCP_INT_MASK | RX12_NO_CPU_DSCP_INT_MASK |        \
-+       RX11_NO_CPU_DSCP_INT_MASK | RX10_NO_CPU_DSCP_INT_MASK |        \
-+       RX9_NO_CPU_DSCP_INT_MASK | RX8_NO_CPU_DSCP_INT_MASK |          \
-+       RX7_NO_CPU_DSCP_INT_MASK | RX6_NO_CPU_DSCP_INT_MASK |          \
-+       RX5_NO_CPU_DSCP_INT_MASK | RX4_NO_CPU_DSCP_INT_MASK |          \
-+       RX3_NO_CPU_DSCP_INT_MASK | RX2_NO_CPU_DSCP_INT_MASK |          \
-+       RX1_NO_CPU_DSCP_INT_MASK | RX0_NO_CPU_DSCP_INT_MASK)
-+
-+#define RX_DONE_LOW_INT_MASK                          \
-+      (RX15_DONE_INT_MASK | RX14_DONE_INT_MASK |      \
-+       RX13_DONE_INT_MASK | RX12_DONE_INT_MASK |      \
-+       RX11_DONE_INT_MASK | RX10_DONE_INT_MASK |      \
-+       RX9_DONE_INT_MASK | RX8_DONE_INT_MASK |        \
-+       RX7_DONE_INT_MASK | RX6_DONE_INT_MASK |        \
-+       RX5_DONE_INT_MASK | RX4_DONE_INT_MASK |        \
-+       RX3_DONE_INT_MASK | RX2_DONE_INT_MASK |        \
-+       RX1_DONE_INT_MASK | RX0_DONE_INT_MASK)
-+
-+#define RX_NO_CPU_DSCP_LOW_OFFSET     __ffs(RX_NO_CPU_DSCP_LOW_INT_MASK)
-+#define INT_RX1_MASK(_n)                                                      \
-+      ((((_n) << RX_NO_CPU_DSCP_LOW_OFFSET) & RX_NO_CPU_DSCP_LOW_INT_MASK) |  \
-+       (RX_DONE_LOW_INT_MASK & (_n)))
-+
-+/* QDMA_CSR_INT_ENABLE3 */
-+#define RX31_NO_CPU_DSCP_INT_MASK     BIT(31)
-+#define RX30_NO_CPU_DSCP_INT_MASK     BIT(30)
-+#define RX29_NO_CPU_DSCP_INT_MASK     BIT(29)
-+#define RX28_NO_CPU_DSCP_INT_MASK     BIT(28)
-+#define RX27_NO_CPU_DSCP_INT_MASK     BIT(27)
-+#define RX26_NO_CPU_DSCP_INT_MASK     BIT(26)
-+#define RX25_NO_CPU_DSCP_INT_MASK     BIT(25)
-+#define RX24_NO_CPU_DSCP_INT_MASK     BIT(24)
-+#define RX23_NO_CPU_DSCP_INT_MASK     BIT(23)
-+#define RX22_NO_CPU_DSCP_INT_MASK     BIT(22)
-+#define RX21_NO_CPU_DSCP_INT_MASK     BIT(21)
-+#define RX20_NO_CPU_DSCP_INT_MASK     BIT(20)
-+#define RX19_NO_CPU_DSCP_INT_MASK     BIT(19)
-+#define RX18_NO_CPU_DSCP_INT_MASK     BIT(18)
-+#define RX17_NO_CPU_DSCP_INT_MASK     BIT(17)
-+#define RX16_NO_CPU_DSCP_INT_MASK     BIT(16)
-+#define RX31_DONE_INT_MASK            BIT(15)
-+#define RX30_DONE_INT_MASK            BIT(14)
-+#define RX29_DONE_INT_MASK            BIT(13)
-+#define RX28_DONE_INT_MASK            BIT(12)
-+#define RX27_DONE_INT_MASK            BIT(11)
-+#define RX26_DONE_INT_MASK            BIT(10)
-+#define RX25_DONE_INT_MASK            BIT(9)
-+#define RX24_DONE_INT_MASK            BIT(8)
-+#define RX23_DONE_INT_MASK            BIT(7)
-+#define RX22_DONE_INT_MASK            BIT(6)
-+#define RX21_DONE_INT_MASK            BIT(5)
-+#define RX20_DONE_INT_MASK            BIT(4)
-+#define RX19_DONE_INT_MASK            BIT(3)
-+#define RX18_DONE_INT_MASK            BIT(2)
-+#define RX17_DONE_INT_MASK            BIT(1)
-+#define RX16_DONE_INT_MASK            BIT(0)
-+
-+#define RX_NO_CPU_DSCP_HIGH_INT_MASK                                  \
-+      (RX31_NO_CPU_DSCP_INT_MASK | RX30_NO_CPU_DSCP_INT_MASK |        \
-+       RX29_NO_CPU_DSCP_INT_MASK | RX28_NO_CPU_DSCP_INT_MASK |        \
-+       RX27_NO_CPU_DSCP_INT_MASK | RX26_NO_CPU_DSCP_INT_MASK |        \
-+       RX25_NO_CPU_DSCP_INT_MASK | RX24_NO_CPU_DSCP_INT_MASK |        \
-+       RX23_NO_CPU_DSCP_INT_MASK | RX22_NO_CPU_DSCP_INT_MASK |        \
-+       RX21_NO_CPU_DSCP_INT_MASK | RX20_NO_CPU_DSCP_INT_MASK |        \
-+       RX19_NO_CPU_DSCP_INT_MASK | RX18_NO_CPU_DSCP_INT_MASK |        \
-+       RX17_NO_CPU_DSCP_INT_MASK | RX16_NO_CPU_DSCP_INT_MASK)
-+
-+#define RX_DONE_HIGH_INT_MASK                         \
-+      (RX31_DONE_INT_MASK | RX30_DONE_INT_MASK |      \
-+       RX29_DONE_INT_MASK | RX28_DONE_INT_MASK |      \
-+       RX27_DONE_INT_MASK | RX26_DONE_INT_MASK |      \
-+       RX25_DONE_INT_MASK | RX24_DONE_INT_MASK |      \
-+       RX23_DONE_INT_MASK | RX22_DONE_INT_MASK |      \
-+       RX21_DONE_INT_MASK | RX20_DONE_INT_MASK |      \
-+       RX19_DONE_INT_MASK | RX18_DONE_INT_MASK |      \
-+       RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
-+
-+#define RX_DONE_INT_MASK      (RX_DONE_HIGH_INT_MASK | RX_DONE_LOW_INT_MASK)
-+#define RX_DONE_HIGH_OFFSET   fls(RX_DONE_HIGH_INT_MASK)
-+
-+#define INT_RX2_MASK(_n)                              \
-+      ((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) |        \
-+       (((_n) >> RX_DONE_HIGH_OFFSET) & RX_DONE_HIGH_INT_MASK))
-+
-+/* QDMA_CSR_INT_ENABLE4 */
-+#define RX31_COHERENT_INT_MASK                BIT(31)
-+#define RX30_COHERENT_INT_MASK                BIT(30)
-+#define RX29_COHERENT_INT_MASK                BIT(29)
-+#define RX28_COHERENT_INT_MASK                BIT(28)
-+#define RX27_COHERENT_INT_MASK                BIT(27)
-+#define RX26_COHERENT_INT_MASK                BIT(26)
-+#define RX25_COHERENT_INT_MASK                BIT(25)
-+#define RX24_COHERENT_INT_MASK                BIT(24)
-+#define RX23_COHERENT_INT_MASK                BIT(23)
-+#define RX22_COHERENT_INT_MASK                BIT(22)
-+#define RX21_COHERENT_INT_MASK                BIT(21)
-+#define RX20_COHERENT_INT_MASK                BIT(20)
-+#define RX19_COHERENT_INT_MASK                BIT(19)
-+#define RX18_COHERENT_INT_MASK                BIT(18)
-+#define RX17_COHERENT_INT_MASK                BIT(17)
-+#define RX16_COHERENT_INT_MASK                BIT(16)
-+
-+#define RX_COHERENT_HIGH_INT_MASK                             \
-+      (RX31_COHERENT_INT_MASK | RX30_COHERENT_INT_MASK |      \
-+       RX29_COHERENT_INT_MASK | RX28_COHERENT_INT_MASK |      \
-+       RX27_COHERENT_INT_MASK | RX26_COHERENT_INT_MASK |      \
-+       RX25_COHERENT_INT_MASK | RX24_COHERENT_INT_MASK |      \
-+       RX23_COHERENT_INT_MASK | RX22_COHERENT_INT_MASK |      \
-+       RX21_COHERENT_INT_MASK | RX20_COHERENT_INT_MASK |      \
-+       RX19_COHERENT_INT_MASK | RX18_COHERENT_INT_MASK |      \
-+       RX17_COHERENT_INT_MASK | RX16_COHERENT_INT_MASK)
-+
-+#define INT_RX3_MASK(_n)      (RX_COHERENT_HIGH_INT_MASK & (_n))
- /* QDMA_CSR_INT_ENABLE5 */
- #define TX31_COHERENT_INT_MASK                BIT(31)
-@@ -556,19 +667,19 @@
- #define TX9_COHERENT_INT_MASK         BIT(9)
- #define TX8_COHERENT_INT_MASK         BIT(8)
--#define INT_IDX4_MASK                                         \
--      (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK |        \
--       TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK |      \
--       TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK |      \
--       TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK |      \
--       TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK |      \
--       TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK |      \
--       TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK |      \
--       TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK |      \
--       TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK |      \
--       TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK |      \
--       TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK |      \
--       TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
-+#define TX_COHERENT_HIGH_INT_MASK                             \
-+      (TX31_COHERENT_INT_MASK | TX30_COHERENT_INT_MASK |      \
-+       TX29_COHERENT_INT_MASK | TX28_COHERENT_INT_MASK |      \
-+       TX27_COHERENT_INT_MASK | TX26_COHERENT_INT_MASK |      \
-+       TX25_COHERENT_INT_MASK | TX24_COHERENT_INT_MASK |      \
-+       TX23_COHERENT_INT_MASK | TX22_COHERENT_INT_MASK |      \
-+       TX21_COHERENT_INT_MASK | TX20_COHERENT_INT_MASK |      \
-+       TX19_COHERENT_INT_MASK | TX18_COHERENT_INT_MASK |      \
-+       TX17_COHERENT_INT_MASK | TX16_COHERENT_INT_MASK |      \
-+       TX15_COHERENT_INT_MASK | TX14_COHERENT_INT_MASK |      \
-+       TX13_COHERENT_INT_MASK | TX12_COHERENT_INT_MASK |      \
-+       TX11_COHERENT_INT_MASK | TX10_COHERENT_INT_MASK |      \
-+       TX9_COHERENT_INT_MASK | TX8_COHERENT_INT_MASK)
- #define REG_TX_IRQ_BASE(_n)           ((_n) ? 0x0048 : 0x0050)
diff --git a/target/linux/airoha/patches-6.6/071-v6.15-net-airoha-Add-missing-field-to-ppe_mbox_data-struct.patch b/target/linux/airoha/patches-6.6/071-v6.15-net-airoha-Add-missing-field-to-ppe_mbox_data-struct.patch
deleted file mode 100644 (file)
index 2fb90b6..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-From 4a7843cc8a41b9612becccc07715ed017770eb89 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 6 May 2025 18:56:47 +0200
-Subject: [PATCH] net: airoha: Add missing field to ppe_mbox_data struct
-
-The official Airoha EN7581 firmware requires adding max_packet field in
-ppe_mbox_data struct while the unofficial one used to develop the Airoha
-EN7581 flowtable support does not require this field.
-This patch does not introduce any real backwards compatible issue since
-EN7581 fw is not publicly available in linux-firmware or other
-repositories (e.g. OpenWrt) yet and the official fw version will use this
-new layout. For this reason this change needs to be backported.
-Moreover, make explicit the padding added by the compiler introducing
-the rsv array in init_info struct.
-At the same time use u32 instead of int for init_info and set_info
-struct definitions in ppe_mbox_data struct.
-
-Fixes: 23290c7bc190d ("net: airoha: Introduce Airoha NPU support")
-Reviewed-by: Simon Horman <horms@kernel.org>
-Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250506-airoha-en7581-fix-ppe_mbox_data-v5-1-29cabed6864d@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_npu.c | 10 ++++++----
- 1 file changed, 6 insertions(+), 4 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_npu.c
-+++ b/drivers/net/ethernet/airoha/airoha_npu.c
-@@ -104,12 +104,14 @@ struct ppe_mbox_data {
-                       u8 xpon_hal_api;
-                       u8 wan_xsi;
-                       u8 ct_joyme4;
--                      int ppe_type;
--                      int wan_mode;
--                      int wan_sel;
-+                      u8 max_packet;
-+                      u8 rsv[3];
-+                      u32 ppe_type;
-+                      u32 wan_mode;
-+                      u32 wan_sel;
-               } init_info;
-               struct {
--                      int func_id;
-+                      u32 func_id;
-                       u32 size;
-                       u32 data;
-               } set_info;
diff --git a/target/linux/airoha/patches-6.6/072-v6.15-net-airoha-Fix-page-recycling-in-airoha_qdma_rx_proc.patch b/target/linux/airoha/patches-6.6/072-v6.15-net-airoha-Fix-page-recycling-in-airoha_qdma_rx_proc.patch
deleted file mode 100644 (file)
index bcf60ce..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-From d6d2b0e1538d5c381ec0ca95afaf772c096ea5dc Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 15 May 2025 08:33:06 +0200
-Subject: [PATCH] net: airoha: Fix page recycling in airoha_qdma_rx_process()
-
-Do not recycle the page twice in airoha_qdma_rx_process routine in case
-of error. Just run dev_kfree_skb() if the skb has been allocated and marked
-for recycling. Run page_pool_put_full_page() directly if the skb has not
-been allocated yet.
-Moreover, rely on DMA address from queue entry element instead of reading
-it from the DMA descriptor for DMA syncing in airoha_qdma_rx_process().
-
-Fixes: e12182ddb6e71 ("net: airoha: Enable Rx Scatter-Gather")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250515-airoha-fix-rx-process-error-condition-v2-1-657e92c894b9@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 22 +++++++++-------------
- 1 file changed, 9 insertions(+), 13 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -636,7 +636,6 @@ static int airoha_qdma_rx_process(struct
-               struct airoha_queue_entry *e = &q->entry[q->tail];
-               struct airoha_qdma_desc *desc = &q->desc[q->tail];
-               u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
--              dma_addr_t dma_addr = le32_to_cpu(desc->addr);
-               struct page *page = virt_to_head_page(e->buf);
-               u32 desc_ctrl = le32_to_cpu(desc->ctrl);
-               struct airoha_gdm_port *port;
-@@ -645,22 +644,16 @@ static int airoha_qdma_rx_process(struct
-               if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
-                       break;
--              if (!dma_addr)
--                      break;
--
--              len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
--              if (!len)
--                      break;
--
-               q->tail = (q->tail + 1) % q->ndesc;
-               q->queued--;
--              dma_sync_single_for_cpu(eth->dev, dma_addr,
-+              dma_sync_single_for_cpu(eth->dev, e->dma_addr,
-                                       SKB_WITH_OVERHEAD(q->buf_size), dir);
-+              len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
-               data_len = q->skb ? q->buf_size
-                                 : SKB_WITH_OVERHEAD(q->buf_size);
--              if (data_len < len)
-+              if (!len || data_len < len)
-                       goto free_frag;
-               p = airoha_qdma_get_gdm_port(eth, desc);
-@@ -723,9 +716,12 @@ static int airoha_qdma_rx_process(struct
-               q->skb = NULL;
-               continue;
- free_frag:
--              page_pool_put_full_page(q->page_pool, page, true);
--              dev_kfree_skb(q->skb);
--              q->skb = NULL;
-+              if (q->skb) {
-+                      dev_kfree_skb(q->skb);
-+                      q->skb = NULL;
-+              } else {
-+                      page_pool_put_full_page(q->page_pool, page, true);
-+              }
-       }
-       airoha_qdma_fill_rx_queue(q);
diff --git a/target/linux/airoha/patches-6.6/073-01-v6.16-net-airoha-npu-Move-memory-allocation-in-airoha_npu_.patch b/target/linux/airoha/patches-6.6/073-01-v6.16-net-airoha-npu-Move-memory-allocation-in-airoha_npu_.patch
deleted file mode 100644 (file)
index f0c41d5..0000000
+++ /dev/null
@@ -1,196 +0,0 @@
-From c52918744ee1e49cea86622a2633b9782446428f Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 16 May 2025 09:59:59 +0200
-Subject: [PATCH 1/3] net: airoha: npu: Move memory allocation in
- airoha_npu_send_msg() caller
-
-Move ppe_mbox_data struct memory allocation from airoha_npu_send_msg
-routine to the caller one. This is a preliminary patch to enable wlan NPU
-offloading and flow counter stats support.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250516-airoha-en7581-flowstats-v2-1-06d5fbf28984@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_npu.c | 126 +++++++++++++----------
- 1 file changed, 72 insertions(+), 54 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_npu.c
-+++ b/drivers/net/ethernet/airoha/airoha_npu.c
-@@ -124,17 +124,12 @@ static int airoha_npu_send_msg(struct ai
-       u16 core = 0; /* FIXME */
-       u32 val, offset = core << 4;
-       dma_addr_t dma_addr;
--      void *addr;
-       int ret;
--      addr = kmemdup(p, size, GFP_ATOMIC);
--      if (!addr)
--              return -ENOMEM;
--
--      dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE);
-+      dma_addr = dma_map_single(npu->dev, p, size, DMA_TO_DEVICE);
-       ret = dma_mapping_error(npu->dev, dma_addr);
-       if (ret)
--              goto out;
-+              return ret;
-       spin_lock_bh(&npu->cores[core].lock);
-@@ -155,8 +150,6 @@ static int airoha_npu_send_msg(struct ai
-       spin_unlock_bh(&npu->cores[core].lock);
-       dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
--out:
--      kfree(addr);
-       return ret;
- }
-@@ -261,76 +254,101 @@ static irqreturn_t airoha_npu_wdt_handle
- static int airoha_npu_ppe_init(struct airoha_npu *npu)
- {
--      struct ppe_mbox_data ppe_data = {
--              .func_type = NPU_OP_SET,
--              .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT,
--              .init_info = {
--                      .ppe_type = PPE_TYPE_L2B_IPV4_IPV6,
--                      .wan_mode = QDMA_WAN_ETHER,
--              },
--      };
-+      struct ppe_mbox_data *ppe_data;
-+      int err;
--      return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
--                                 sizeof(struct ppe_mbox_data));
-+      ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
-+      if (!ppe_data)
-+              return -ENOMEM;
-+
-+      ppe_data->func_type = NPU_OP_SET;
-+      ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT;
-+      ppe_data->init_info.ppe_type = PPE_TYPE_L2B_IPV4_IPV6;
-+      ppe_data->init_info.wan_mode = QDMA_WAN_ETHER;
-+
-+      err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
-+                                sizeof(*ppe_data));
-+      kfree(ppe_data);
-+
-+      return err;
- }
- static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
- {
--      struct ppe_mbox_data ppe_data = {
--              .func_type = NPU_OP_SET,
--              .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
--      };
-+      struct ppe_mbox_data *ppe_data;
-+      int err;
-+
-+      ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
-+      if (!ppe_data)
-+              return -ENOMEM;
--      return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
--                                 sizeof(struct ppe_mbox_data));
-+      ppe_data->func_type = NPU_OP_SET;
-+      ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT;
-+
-+      err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
-+                                sizeof(*ppe_data));
-+      kfree(ppe_data);
-+
-+      return err;
- }
- static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
-                                            dma_addr_t foe_addr,
-                                            int sram_num_entries)
- {
--      struct ppe_mbox_data ppe_data = {
--              .func_type = NPU_OP_SET,
--              .func_id = PPE_FUNC_SET_WAIT_API,
--              .set_info = {
--                      .func_id = PPE_SRAM_RESET_VAL,
--                      .data = foe_addr,
--                      .size = sram_num_entries,
--              },
--      };
-+      struct ppe_mbox_data *ppe_data;
-+      int err;
-+
-+      ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
-+      if (!ppe_data)
-+              return -ENOMEM;
-+
-+      ppe_data->func_type = NPU_OP_SET;
-+      ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
-+      ppe_data->set_info.func_id = PPE_SRAM_RESET_VAL;
-+      ppe_data->set_info.data = foe_addr;
-+      ppe_data->set_info.size = sram_num_entries;
-+
-+      err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
-+                                sizeof(*ppe_data));
-+      kfree(ppe_data);
--      return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
--                                 sizeof(struct ppe_mbox_data));
-+      return err;
- }
- static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
-                                      dma_addr_t foe_addr,
-                                      u32 entry_size, u32 hash, bool ppe2)
- {
--      struct ppe_mbox_data ppe_data = {
--              .func_type = NPU_OP_SET,
--              .func_id = PPE_FUNC_SET_WAIT_API,
--              .set_info = {
--                      .data = foe_addr,
--                      .size = entry_size,
--              },
--      };
-+      struct ppe_mbox_data *ppe_data;
-       int err;
--      ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
--                                       : PPE_SRAM_SET_ENTRY;
-+      ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
-+      if (!ppe_data)
-+              return -ENOMEM;
-+
-+      ppe_data->func_type = NPU_OP_SET;
-+      ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
-+      ppe_data->set_info.data = foe_addr;
-+      ppe_data->set_info.size = entry_size;
-+      ppe_data->set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
-+                                        : PPE_SRAM_SET_ENTRY;
--      err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
--                                sizeof(struct ppe_mbox_data));
-+      err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
-+                                sizeof(*ppe_data));
-       if (err)
--              return err;
-+              goto out;
--      ppe_data.set_info.func_id = PPE_SRAM_SET_VAL;
--      ppe_data.set_info.data = hash;
--      ppe_data.set_info.size = sizeof(u32);
-+      ppe_data->set_info.func_id = PPE_SRAM_SET_VAL;
-+      ppe_data->set_info.data = hash;
-+      ppe_data->set_info.size = sizeof(u32);
-+
-+      err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
-+                                sizeof(*ppe_data));
-+out:
-+      kfree(ppe_data);
--      return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
--                                 sizeof(struct ppe_mbox_data));
-+      return err;
- }
- struct airoha_npu *airoha_npu_get(struct device *dev)
diff --git a/target/linux/airoha/patches-6.6/073-02-v6.16-net-airoha-Add-FLOW_CLS_STATS-callback-support.patch b/target/linux/airoha/patches-6.6/073-02-v6.16-net-airoha-Add-FLOW_CLS_STATS-callback-support.patch
deleted file mode 100644 (file)
index 584ddb1..0000000
+++ /dev/null
@@ -1,633 +0,0 @@
-From b81e0f2b58be37628b2e12f8dffdd63c84573e75 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 16 May 2025 10:00:00 +0200
-Subject: [PATCH 2/3] net: airoha: Add FLOW_CLS_STATS callback support
-
-Introduce per-flow stats accounting to the flowtable hw offload in
-the airoha_eth driver. Flow stats are split in the PPE and NPU modules:
-- PPE: accounts for high 32bit of per-flow stats
-- NPU: accounts for low 32bit of per-flow stats
-
-FLOW_CLS_STATS can be enabled or disabled at compile time.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250516-airoha-en7581-flowstats-v2-2-06d5fbf28984@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/Kconfig           |   7 +
- drivers/net/ethernet/airoha/airoha_eth.h      |  33 +++
- drivers/net/ethernet/airoha/airoha_npu.c      |  52 +++-
- drivers/net/ethernet/airoha/airoha_npu.h      |   4 +-
- drivers/net/ethernet/airoha/airoha_ppe.c      | 269 ++++++++++++++++--
- .../net/ethernet/airoha/airoha_ppe_debugfs.c  |   9 +-
- 6 files changed, 354 insertions(+), 20 deletions(-)
-
---- a/drivers/net/ethernet/airoha/Kconfig
-+++ b/drivers/net/ethernet/airoha/Kconfig
-@@ -24,4 +24,11 @@ config NET_AIROHA
-         This driver supports the gigabit ethernet MACs in the
-         Airoha SoC family.
-+config NET_AIROHA_FLOW_STATS
-+      default y
-+      bool "Airoha flow stats"
-+      depends on NET_AIROHA && NET_AIROHA_NPU
-+      help
-+        Enable Aiorha flowtable statistic counters.
-+
- endif #NET_VENDOR_AIROHA
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -50,6 +50,14 @@
- #define PPE_NUM                               2
- #define PPE1_SRAM_NUM_ENTRIES         (8 * 1024)
- #define PPE_SRAM_NUM_ENTRIES          (2 * PPE1_SRAM_NUM_ENTRIES)
-+#ifdef CONFIG_NET_AIROHA_FLOW_STATS
-+#define PPE1_STATS_NUM_ENTRIES                (4 * 1024)
-+#else
-+#define PPE1_STATS_NUM_ENTRIES                0
-+#endif /* CONFIG_NET_AIROHA_FLOW_STATS */
-+#define PPE_STATS_NUM_ENTRIES         (2 * PPE1_STATS_NUM_ENTRIES)
-+#define PPE1_SRAM_NUM_DATA_ENTRIES    (PPE1_SRAM_NUM_ENTRIES - PPE1_STATS_NUM_ENTRIES)
-+#define PPE_SRAM_NUM_DATA_ENTRIES     (2 * PPE1_SRAM_NUM_DATA_ENTRIES)
- #define PPE_DRAM_NUM_ENTRIES          (16 * 1024)
- #define PPE_NUM_ENTRIES                       (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES)
- #define PPE_HASH_MASK                 (PPE_NUM_ENTRIES - 1)
-@@ -261,6 +269,8 @@ struct airoha_foe_mac_info {
-       u16 pppoe_id;
-       u16 src_mac_lo;
-+
-+      u32 meter;
- };
- #define AIROHA_FOE_IB1_UNBIND_PREBIND         BIT(24)
-@@ -296,6 +306,11 @@ struct airoha_foe_mac_info {
- #define AIROHA_FOE_TUNNEL                     BIT(6)
- #define AIROHA_FOE_TUNNEL_ID                  GENMASK(5, 0)
-+#define AIROHA_FOE_TUNNEL_MTU                 GENMASK(31, 16)
-+#define AIROHA_FOE_ACNT_GRP3                  GENMASK(15, 9)
-+#define AIROHA_FOE_METER_GRP3                 GENMASK(8, 5)
-+#define AIROHA_FOE_METER_GRP2                 GENMASK(4, 0)
-+
- struct airoha_foe_bridge {
-       u32 dest_mac_hi;
-@@ -379,6 +394,8 @@ struct airoha_foe_ipv6 {
-       u32 ib2;
-       struct airoha_foe_mac_info_common l2;
-+
-+      u32 meter;
- };
- struct airoha_foe_entry {
-@@ -397,6 +414,16 @@ struct airoha_foe_entry {
-       };
- };
-+struct airoha_foe_stats {
-+      u32 bytes;
-+      u32 packets;
-+};
-+
-+struct airoha_foe_stats64 {
-+      u64 bytes;
-+      u64 packets;
-+};
-+
- struct airoha_flow_data {
-       struct ethhdr eth;
-@@ -447,6 +474,7 @@ struct airoha_flow_table_entry {
-       struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */
-       u32 hash;
-+      struct airoha_foe_stats64 stats;
-       enum airoha_flow_entry_type type;
-       struct rhash_head node;
-@@ -523,6 +551,9 @@ struct airoha_ppe {
-       struct hlist_head *foe_flow;
-       u16 foe_check_time[PPE_NUM_ENTRIES];
-+      struct airoha_foe_stats *foe_stats;
-+      dma_addr_t foe_stats_dma;
-+
-       struct dentry *debugfs_dir;
- };
-@@ -582,6 +613,8 @@ int airoha_ppe_init(struct airoha_eth *e
- void airoha_ppe_deinit(struct airoha_eth *eth);
- struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
-                                                 u32 hash);
-+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
-+                                  struct airoha_foe_stats64 *stats);
- #ifdef CONFIG_DEBUG_FS
- int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
---- a/drivers/net/ethernet/airoha/airoha_npu.c
-+++ b/drivers/net/ethernet/airoha/airoha_npu.c
-@@ -12,6 +12,7 @@
- #include <linux/of_reserved_mem.h>
- #include <linux/regmap.h>
-+#include "airoha_eth.h"
- #include "airoha_npu.h"
- #define NPU_EN7581_FIRMWARE_DATA              "airoha/en7581_npu_data.bin"
-@@ -72,6 +73,7 @@ enum {
-       PPE_FUNC_SET_WAIT_HWNAT_INIT,
-       PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
-       PPE_FUNC_SET_WAIT_API,
-+      PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP,
- };
- enum {
-@@ -115,6 +117,10 @@ struct ppe_mbox_data {
-                       u32 size;
-                       u32 data;
-               } set_info;
-+              struct {
-+                      u32 npu_stats_addr;
-+                      u32 foe_stats_addr;
-+              } stats_info;
-       };
- };
-@@ -351,7 +357,40 @@ out:
-       return err;
- }
--struct airoha_npu *airoha_npu_get(struct device *dev)
-+static int airoha_npu_stats_setup(struct airoha_npu *npu,
-+                                dma_addr_t foe_stats_addr)
-+{
-+      int err, size = PPE_STATS_NUM_ENTRIES * sizeof(*npu->stats);
-+      struct ppe_mbox_data *ppe_data;
-+
-+      if (!size) /* flow stats are disabled */
-+              return 0;
-+
-+      ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
-+      if (!ppe_data)
-+              return -ENOMEM;
-+
-+      ppe_data->func_type = NPU_OP_SET;
-+      ppe_data->func_id = PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP;
-+      ppe_data->stats_info.foe_stats_addr = foe_stats_addr;
-+
-+      err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
-+                                sizeof(*ppe_data));
-+      if (err)
-+              goto out;
-+
-+      npu->stats = devm_ioremap(npu->dev,
-+                                ppe_data->stats_info.npu_stats_addr,
-+                                size);
-+      if (!npu->stats)
-+              err = -ENOMEM;
-+out:
-+      kfree(ppe_data);
-+
-+      return err;
-+}
-+
-+struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
- {
-       struct platform_device *pdev;
-       struct device_node *np;
-@@ -389,6 +428,17 @@ struct airoha_npu *airoha_npu_get(struct
-               goto error_module_put;
-       }
-+      if (stats_addr) {
-+              int err;
-+
-+              err = airoha_npu_stats_setup(npu, *stats_addr);
-+              if (err) {
-+                      dev_err(dev, "failed to allocate npu stats buffer\n");
-+                      npu = ERR_PTR(err);
-+                      goto error_module_put;
-+              }
-+      }
-+
-       return npu;
- error_module_put:
---- a/drivers/net/ethernet/airoha/airoha_npu.h
-+++ b/drivers/net/ethernet/airoha/airoha_npu.h
-@@ -17,6 +17,8 @@ struct airoha_npu {
-               struct work_struct wdt_work;
-       } cores[NPU_NUM_CORES];
-+      struct airoha_foe_stats __iomem *stats;
-+
-       struct {
-               int (*ppe_init)(struct airoha_npu *npu);
-               int (*ppe_deinit)(struct airoha_npu *npu);
-@@ -30,5 +32,5 @@ struct airoha_npu {
-       } ops;
- };
--struct airoha_npu *airoha_npu_get(struct device *dev);
-+struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr);
- void airoha_npu_put(struct airoha_npu *npu);
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -102,7 +102,7 @@ static void airoha_ppe_hw_init(struct ai
-       if (airoha_ppe2_is_enabled(eth)) {
-               sram_num_entries =
--                      PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES);
-+                      PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES);
-               airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
-                             PPE_SRAM_TB_NUM_ENTRY_MASK |
-                             PPE_DRAM_TB_NUM_ENTRY_MASK,
-@@ -119,7 +119,7 @@ static void airoha_ppe_hw_init(struct ai
-                                        dram_num_entries));
-       } else {
-               sram_num_entries =
--                      PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES);
-+                      PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES);
-               airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
-                             PPE_SRAM_TB_NUM_ENTRY_MASK |
-                             PPE_DRAM_TB_NUM_ENTRY_MASK,
-@@ -417,6 +417,77 @@ static u32 airoha_ppe_foe_get_entry_hash
-       return hash;
- }
-+static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash)
-+{
-+      if (!airoha_ppe2_is_enabled(ppe->eth))
-+              return hash;
-+
-+      return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES
-+                                           : hash;
-+}
-+
-+static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
-+                                               struct airoha_npu *npu,
-+                                               int index)
-+{
-+      memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
-+      memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
-+}
-+
-+static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
-+                                          struct airoha_npu *npu)
-+{
-+      int i;
-+
-+      for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++)
-+              airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
-+}
-+
-+static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
-+                                           struct airoha_npu *npu,
-+                                           struct airoha_foe_entry *hwe,
-+                                           u32 hash)
-+{
-+      int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
-+      u32 index, pse_port, val, *data, *ib2, *meter;
-+      u8 nbq;
-+
-+      index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
-+      if (index >= PPE_STATS_NUM_ENTRIES)
-+              return;
-+
-+      if (type == PPE_PKT_TYPE_BRIDGE) {
-+              data = &hwe->bridge.data;
-+              ib2 = &hwe->bridge.ib2;
-+              meter = &hwe->bridge.l2.meter;
-+      } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
-+              data = &hwe->ipv6.data;
-+              ib2 = &hwe->ipv6.ib2;
-+              meter = &hwe->ipv6.meter;
-+      } else {
-+              data = &hwe->ipv4.data;
-+              ib2 = &hwe->ipv4.ib2;
-+              meter = &hwe->ipv4.l2.meter;
-+      }
-+
-+      airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
-+
-+      val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
-+      *data = (*data & ~AIROHA_FOE_ACTDP) |
-+              FIELD_PREP(AIROHA_FOE_ACTDP, val);
-+
-+      val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
-+                    AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
-+      *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
-+
-+      pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
-+      nbq = pse_port == 1 ? 6 : 5;
-+      *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
-+                AIROHA_FOE_IB2_PSE_QOS);
-+      *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
-+              FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
-+}
-+
- struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
-                                                 u32 hash)
- {
-@@ -470,6 +541,8 @@ static int airoha_ppe_foe_commit_entry(s
-       struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
-       u32 ts = airoha_ppe_get_timestamp(ppe);
-       struct airoha_eth *eth = ppe->eth;
-+      struct airoha_npu *npu;
-+      int err = 0;
-       memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
-       wmb();
-@@ -478,25 +551,28 @@ static int airoha_ppe_foe_commit_entry(s
-       e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
-       hwe->ib1 = e->ib1;
-+      rcu_read_lock();
-+
-+      npu = rcu_dereference(eth->npu);
-+      if (!npu) {
-+              err = -ENODEV;
-+              goto unlock;
-+      }
-+
-+      airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
-+
-       if (hash < PPE_SRAM_NUM_ENTRIES) {
-               dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
-               bool ppe2 = airoha_ppe2_is_enabled(eth) &&
-                           hash >= PPE1_SRAM_NUM_ENTRIES;
--              struct airoha_npu *npu;
--              int err = -ENODEV;
--
--              rcu_read_lock();
--              npu = rcu_dereference(eth->npu);
--              if (npu)
--                      err = npu->ops.ppe_foe_commit_entry(npu, addr,
--                                                          sizeof(*hwe), hash,
--                                                          ppe2);
--              rcu_read_unlock();
--              return err;
-+              err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe),
-+                                                  hash, ppe2);
-       }
-+unlock:
-+      rcu_read_unlock();
--      return 0;
-+      return err;
- }
- static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
-@@ -582,6 +658,7 @@ airoha_ppe_foe_commit_subflow_entry(stru
-               l2->common.etype = ETH_P_IPV6;
-       hwe.bridge.ib2 = e->data.bridge.ib2;
-+      hwe.bridge.data = e->data.bridge.data;
-       airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
-       return 0;
-@@ -681,6 +758,98 @@ static int airoha_ppe_foe_flow_commit_en
-       return 0;
- }
-+static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
-+{
-+      u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
-+      u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
-+      int idle;
-+
-+      if (state == AIROHA_FOE_STATE_BIND) {
-+              ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
-+              ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
-+      } else {
-+              ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
-+              now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
-+              ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
-+      }
-+      idle = now - ts;
-+
-+      return idle < 0 ? idle + ts_mask + 1 : idle;
-+}
-+
-+static void
-+airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
-+                                  struct airoha_flow_table_entry *e)
-+{
-+      int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
-+      struct airoha_flow_table_entry *iter;
-+      struct hlist_node *n;
-+
-+      lockdep_assert_held(&ppe_lock);
-+
-+      hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
-+              struct airoha_foe_entry *hwe;
-+              u32 ib1, state;
-+              int idle;
-+
-+              hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
-+              ib1 = READ_ONCE(hwe->ib1);
-+
-+              state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
-+              if (state != AIROHA_FOE_STATE_BIND) {
-+                      iter->hash = 0xffff;
-+                      airoha_ppe_foe_remove_flow(ppe, iter);
-+                      continue;
-+              }
-+
-+              idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
-+              if (idle >= min_idle)
-+                      continue;
-+
-+              min_idle = idle;
-+              e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
-+              e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
-+      }
-+}
-+
-+static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
-+                                           struct airoha_flow_table_entry *e)
-+{
-+      struct airoha_foe_entry *hwe_p, hwe = {};
-+
-+      spin_lock_bh(&ppe_lock);
-+
-+      if (e->type == FLOW_TYPE_L2) {
-+              airoha_ppe_foe_flow_l2_entry_update(ppe, e);
-+              goto unlock;
-+      }
-+
-+      if (e->hash == 0xffff)
-+              goto unlock;
-+
-+      hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
-+      if (!hwe_p)
-+              goto unlock;
-+
-+      memcpy(&hwe, hwe_p, sizeof(*hwe_p));
-+      if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
-+              e->hash = 0xffff;
-+              goto unlock;
-+      }
-+
-+      e->data.ib1 = hwe.ib1;
-+unlock:
-+      spin_unlock_bh(&ppe_lock);
-+}
-+
-+static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
-+                                    struct airoha_flow_table_entry *e)
-+{
-+      airoha_ppe_foe_flow_entry_update(ppe, e);
-+
-+      return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
-+}
-+
- static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
-                                          struct flow_cls_offload *f)
- {
-@@ -896,6 +1065,60 @@ static int airoha_ppe_flow_offload_destr
-       return 0;
- }
-+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
-+                                  struct airoha_foe_stats64 *stats)
-+{
-+      u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
-+      struct airoha_eth *eth = ppe->eth;
-+      struct airoha_npu *npu;
-+
-+      if (index >= PPE_STATS_NUM_ENTRIES)
-+              return;
-+
-+      rcu_read_lock();
-+
-+      npu = rcu_dereference(eth->npu);
-+      if (npu) {
-+              u64 packets = ppe->foe_stats[index].packets;
-+              u64 bytes = ppe->foe_stats[index].bytes;
-+              struct airoha_foe_stats npu_stats;
-+
-+              memcpy_fromio(&npu_stats, &npu->stats[index],
-+                            sizeof(*npu->stats));
-+              stats->packets = packets << 32 | npu_stats.packets;
-+              stats->bytes = bytes << 32 | npu_stats.bytes;
-+      }
-+
-+      rcu_read_unlock();
-+}
-+
-+static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
-+                                       struct flow_cls_offload *f)
-+{
-+      struct airoha_eth *eth = port->qdma->eth;
-+      struct airoha_flow_table_entry *e;
-+      u32 idle;
-+
-+      e = rhashtable_lookup(&eth->flow_table, &f->cookie,
-+                            airoha_flow_table_params);
-+      if (!e)
-+              return -ENOENT;
-+
-+      idle = airoha_ppe_entry_idle_time(eth->ppe, e);
-+      f->stats.lastused = jiffies - idle * HZ;
-+
-+      if (e->hash != 0xffff) {
-+              struct airoha_foe_stats64 stats = {};
-+
-+              airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
-+              f->stats.pkts += (stats.packets - e->stats.packets);
-+              f->stats.bytes += (stats.bytes - e->stats.bytes);
-+              e->stats = stats;
-+      }
-+
-+      return 0;
-+}
-+
- static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
-                                      struct flow_cls_offload *f)
- {
-@@ -904,6 +1127,8 @@ static int airoha_ppe_flow_offload_cmd(s
-               return airoha_ppe_flow_offload_replace(port, f);
-       case FLOW_CLS_DESTROY:
-               return airoha_ppe_flow_offload_destroy(port, f);
-+      case FLOW_CLS_STATS:
-+              return airoha_ppe_flow_offload_stats(port, f);
-       default:
-               break;
-       }
-@@ -929,11 +1154,12 @@ static int airoha_ppe_flush_sram_entries
- static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
- {
--      struct airoha_npu *npu = airoha_npu_get(eth->dev);
-+      struct airoha_npu *npu = airoha_npu_get(eth->dev,
-+                                              &eth->ppe->foe_stats_dma);
-       if (IS_ERR(npu)) {
-               request_module("airoha-npu");
--              npu = airoha_npu_get(eth->dev);
-+              npu = airoha_npu_get(eth->dev, &eth->ppe->foe_stats_dma);
-       }
-       return npu;
-@@ -956,6 +1182,8 @@ static int airoha_ppe_offload_setup(stru
-       if (err)
-               goto error_npu_put;
-+      airoha_ppe_foe_flow_stats_reset(eth->ppe, npu);
-+
-       rcu_assign_pointer(eth->npu, npu);
-       synchronize_rcu();
-@@ -1027,6 +1255,15 @@ int airoha_ppe_init(struct airoha_eth *e
-       if (!ppe->foe_flow)
-               return -ENOMEM;
-+      foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats);
-+      if (foe_size) {
-+              ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
-+                                                   &ppe->foe_stats_dma,
-+                                                   GFP_KERNEL);
-+              if (!ppe->foe_stats)
-+                      return -ENOMEM;
-+      }
-+
-       err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
-       if (err)
-               return err;
---- a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
-@@ -61,6 +61,7 @@ static int airoha_ppe_debugfs_foe_show(s
-               u16 *src_port = NULL, *dest_port = NULL;
-               struct airoha_foe_mac_info_common *l2;
-               unsigned char h_source[ETH_ALEN] = {};
-+              struct airoha_foe_stats64 stats = {};
-               unsigned char h_dest[ETH_ALEN];
-               struct airoha_foe_entry *hwe;
-               u32 type, state, ib2, data;
-@@ -144,14 +145,18 @@ static int airoha_ppe_debugfs_foe_show(s
-                               cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
-               }
-+              airoha_ppe_foe_entry_get_stats(ppe, i, &stats);
-+
-               *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
-               *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
-               *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
-               seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
--                            " vlan=%d,%d ib1=%08x ib2=%08x\n",
-+                            " vlan=%d,%d ib1=%08x ib2=%08x"
-+                            " packets=%llu bytes=%llu\n",
-                          h_source, h_dest, l2->etype, data,
--                         l2->vlan1, l2->vlan2, hwe->ib1, ib2);
-+                         l2->vlan1, l2->vlan2, hwe->ib1, ib2,
-+                         stats.packets, stats.bytes);
-       }
-       return 0;
diff --git a/target/linux/airoha/patches-6.6/073-03-v6.16-net-airoha-ppe-Disable-packet-keepalive.patch b/target/linux/airoha/patches-6.6/073-03-v6.16-net-airoha-ppe-Disable-packet-keepalive.patch
deleted file mode 100644 (file)
index 30c74dd..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-From a98326c151ea3d92e9496858cc2dacccd0870941 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 16 May 2025 10:00:01 +0200
-Subject: [PATCH 3/3] net: airoha: ppe: Disable packet keepalive
-
-Since netfilter flowtable entries are now refreshed by flow-stats
-polling, we can disable hw packet keepalive used to periodically send
-packets belonging to offloaded flows to the kernel in order to refresh
-flowtable entries.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250516-airoha-en7581-flowstats-v2-3-06d5fbf28984@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_ppe.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -84,6 +84,7 @@ static void airoha_ppe_hw_init(struct ai
-               airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
-                             PPE_TB_CFG_SEARCH_MISS_MASK |
-+                            PPE_TB_CFG_KEEPALIVE_MASK |
-                             PPE_TB_ENTRY_SIZE_MASK,
-                             FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
-                             FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
diff --git a/target/linux/airoha/patches-6.6/074-01-v6.16-net-airoha-Do-not-store-hfwd-references-in-airoha_qd.patch b/target/linux/airoha/patches-6.6/074-01-v6.16-net-airoha-Do-not-store-hfwd-references-in-airoha_qd.patch
deleted file mode 100644 (file)
index 81d708f..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-From 09aa788f98da3e2f41ce158cc691d6d52e808bc9 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 21 May 2025 09:16:37 +0200
-Subject: [PATCH 1/3] net: airoha: Do not store hfwd references in airoha_qdma
- struct
-
-Since hfwd descriptor and buffer queues are allocated via
-dmam_alloc_coherent() we do not need to store their references
-in airoha_qdma struct. This patch does not introduce any logical changes,
-just code clean-up.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250521-airopha-desc-sram-v3-2-a6e9b085b4f0@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 8 ++------
- drivers/net/ethernet/airoha/airoha_eth.h | 6 ------
- 2 files changed, 2 insertions(+), 12 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -1078,17 +1078,13 @@ static int airoha_qdma_init_hfwd_queues(
-       int size;
-       size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
--      qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
--                                            GFP_KERNEL);
--      if (!qdma->hfwd.desc)
-+      if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
-               return -ENOMEM;
-       airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
-       size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
--      qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
--                                         GFP_KERNEL);
--      if (!qdma->hfwd.q)
-+      if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
-               return -ENOMEM;
-       airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -513,12 +513,6 @@ struct airoha_qdma {
-       struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
-       struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
--
--      /* descriptor and packet buffers for qdma hw forward */
--      struct {
--              void *desc;
--              void *q;
--      } hfwd;
- };
- struct airoha_gdm_port {
diff --git a/target/linux/airoha/patches-6.6/074-02-v6.16-net-airoha-Add-the-capability-to-allocate-hwfd-buffe.patch b/target/linux/airoha/patches-6.6/074-02-v6.16-net-airoha-Add-the-capability-to-allocate-hwfd-buffe.patch
deleted file mode 100644 (file)
index d6f3e94..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-From 3a1ce9e3d01bbf3912c3e3f81cb554d558eb715b Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 21 May 2025 09:16:38 +0200
-Subject: [PATCH 2/3] net: airoha: Add the capability to allocate hwfd buffers
- via reserved-memory
-
-In some configurations QDMA blocks require a contiguous block of
-system memory for hwfd buffers queue. Introduce the capability to allocate
-hw buffers forwarding queue via the reserved-memory DTS property instead of
-running dmam_alloc_coherent().
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250521-airopha-desc-sram-v3-3-a6e9b085b4f0@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 33 +++++++++++++++++++++---
- 1 file changed, 30 insertions(+), 3 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -5,6 +5,7 @@
-  */
- #include <linux/of.h>
- #include <linux/of_net.h>
-+#include <linux/of_reserved_mem.h>
- #include <linux/platform_device.h>
- #include <linux/tcp.h>
- #include <linux/u64_stats_sync.h>
-@@ -1073,9 +1074,11 @@ static void airoha_qdma_cleanup_tx_queue
- static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
- {
-       struct airoha_eth *eth = qdma->eth;
-+      int id = qdma - &eth->qdma[0];
-       dma_addr_t dma_addr;
-+      const char *name;
-+      int size, index;
-       u32 status;
--      int size;
-       size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
-       if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
-@@ -1083,10 +1086,34 @@ static int airoha_qdma_init_hfwd_queues(
-       airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
--      size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
--      if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
-+      name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
-+      if (!name)
-               return -ENOMEM;
-+      index = of_property_match_string(eth->dev->of_node,
-+                                       "memory-region-names", name);
-+      if (index >= 0) {
-+              struct reserved_mem *rmem;
-+              struct device_node *np;
-+
-+              /* Consume reserved memory for hw forwarding buffers queue if
-+               * available in the DTS
-+               */
-+              np = of_parse_phandle(eth->dev->of_node, "memory-region",
-+                                    index);
-+              if (!np)
-+                      return -ENODEV;
-+
-+              rmem = of_reserved_mem_lookup(np);
-+              of_node_put(np);
-+              dma_addr = rmem->base;
-+      } else {
-+              size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
-+              if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
-+                                       GFP_KERNEL))
-+                      return -ENOMEM;
-+      }
-+
-       airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
-       airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
diff --git a/target/linux/airoha/patches-6.6/074-03-v6.16-net-airoha-Add-the-capability-to-allocate-hfwd-descr.patch b/target/linux/airoha/patches-6.6/074-03-v6.16-net-airoha-Add-the-capability-to-allocate-hfwd-descr.patch
deleted file mode 100644 (file)
index a380adf..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-From c683e378c0907e66cee939145edf936c254ff1e3 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 21 May 2025 09:16:39 +0200
-Subject: [PATCH 3/3] net: airoha: Add the capability to allocate hfwd
- descriptors in SRAM
-
-In order to improve packet processing and packet forwarding
-performances, EN7581 SoC supports consuming SRAM instead of DRAM for
-hw forwarding descriptors queue.
-For downlink hw accelerated traffic request to consume SRAM memory
-for hw forwarding descriptors queue.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250521-airopha-desc-sram-v3-4-a6e9b085b4f0@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 11 +----------
- drivers/net/ethernet/airoha/airoha_eth.h |  9 +++++++++
- drivers/net/ethernet/airoha/airoha_ppe.c |  6 ++++++
- 3 files changed, 16 insertions(+), 10 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -71,15 +71,6 @@ static void airoha_qdma_irq_disable(stru
-       airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
- }
--static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
--{
--      /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
--       * GDM{2,3,4} can be used as wan port connected to an external
--       * phy module.
--       */
--      return port->id == 1;
--}
--
- static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
- {
-       struct airoha_eth *eth = port->qdma->eth;
-@@ -1125,7 +1116,7 @@ static int airoha_qdma_init_hfwd_queues(
-                       LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
-                       HW_FWD_DESC_NUM_MASK,
-                       FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
--                      LMGR_INIT_START);
-+                      LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
-       return read_poll_timeout(airoha_qdma_rr, status,
-                                !(status & LMGR_INIT_START), USEC_PER_MSEC,
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -597,6 +597,15 @@ u32 airoha_rmw(void __iomem *base, u32 o
- #define airoha_qdma_clear(qdma, offset, val)                  \
-       airoha_rmw((qdma)->regs, (offset), (val), 0)
-+static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
-+{
-+      /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
-+       * GDM{2,3,4} can be used as wan port connected to an external
-+       * phy module.
-+       */
-+      return port->id == 1;
-+}
-+
- bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
-                             struct airoha_gdm_port *port);
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -251,6 +251,12 @@ static int airoha_ppe_foe_entry_prepare(
-               else
-                       pse_port = 2; /* uplink relies on GDM2 loopback */
-               val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
-+
-+              /* For downlink traffic consume SRAM memory for hw forwarding
-+               * descriptors queue.
-+               */
-+              if (airhoa_is_lan_gdm_port(port))
-+                      val |= AIROHA_FOE_IB2_FAST_PATH;
-       }
-       if (is_multicast_ether_addr(data->eth.h_dest))
diff --git a/target/linux/airoha/patches-6.6/075-v6.16-net-airoha-Fix-an-error-handling-path-in-airoha_allo.patch b/target/linux/airoha/patches-6.6/075-v6.16-net-airoha-Fix-an-error-handling-path-in-airoha_allo.patch
deleted file mode 100644 (file)
index 8606cff..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-From c59783780c8ad66f6076a9a7c74df3e006e29519 Mon Sep 17 00:00:00 2001
-From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
-Date: Sat, 24 May 2025 09:29:11 +0200
-Subject: [PATCH] net: airoha: Fix an error handling path in
- airoha_alloc_gdm_port()
-
-If register_netdev() fails, the error handling path of the probe will not
-free the memory allocated by the previous airoha_metadata_dst_alloc() call
-because port->dev->reg_state will not be NETREG_REGISTERED.
-
-So, an explicit airoha_metadata_dst_free() call is needed in this case to
-avoid a memory leak.
-
-Fixes: af3cf757d5c9 ("net: airoha: Move DSA tag in DMA descriptor")
-Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
-Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/1b94b91345017429ed653e2f05d25620dc2823f9.1746715755.git.christophe.jaillet@wanadoo.fr
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 10 +++++++++-
- 1 file changed, 9 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -2881,7 +2881,15 @@ static int airoha_alloc_gdm_port(struct
-       if (err)
-               return err;
--      return register_netdev(dev);
-+      err = register_netdev(dev);
-+      if (err)
-+              goto free_metadata_dst;
-+
-+      return 0;
-+
-+free_metadata_dst:
-+      airoha_metadata_dst_free(port);
-+      return err;
- }
- static int airoha_probe(struct platform_device *pdev)
diff --git a/target/linux/airoha/patches-6.6/076-01-v6.16-net-airoha-Initialize-PPE-UPDMEM-source-mac-table.patch b/target/linux/airoha/patches-6.6/076-01-v6.16-net-airoha-Initialize-PPE-UPDMEM-source-mac-table.patch
deleted file mode 100644 (file)
index 334661d..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-From a869d3a5eb011a9cf9bd864f31f5cf27362de8c7 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 2 Jun 2025 12:55:37 +0200
-Subject: [PATCH 1/3] net: airoha: Initialize PPE UPDMEM source-mac table
-
-UPDMEM source-mac table is a key-value map used to store devices mac
-addresses according to the port identifier. UPDMEM source mac table is
-used during IPv6 traffic hw acceleration since PPE entries, for space
-constraints, do not contain the full source mac address but just the
-identifier in the UPDMEM source-mac table.
-Configure UPDMEM source-mac table with device mac addresses and set
-the source-mac ID field for PPE IPv6 entries in order to select the
-proper device mac address as source mac for L3 IPv6 hw accelerated traffic.
-
-Fixes: 00a7678310fe ("net: airoha: Introduce flowtable offload support")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250602-airoha-flowtable-ipv6-fix-v2-1-3287f8b55214@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.c  |  2 ++
- drivers/net/ethernet/airoha/airoha_eth.h  |  1 +
- drivers/net/ethernet/airoha/airoha_ppe.c  | 26 ++++++++++++++++++++++-
- drivers/net/ethernet/airoha/airoha_regs.h | 10 +++++++++
- 4 files changed, 38 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -84,6 +84,8 @@ static void airoha_set_macaddr(struct ai
-       val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
-       airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
-       airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
-+
-+      airoha_ppe_init_upd_mem(port);
- }
- static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -614,6 +614,7 @@ void airoha_ppe_check_skb(struct airoha_
- int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
- int airoha_ppe_init(struct airoha_eth *eth);
- void airoha_ppe_deinit(struct airoha_eth *eth);
-+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
- struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
-                                                 u32 hash);
- void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -223,6 +223,7 @@ static int airoha_ppe_foe_entry_prepare(
-       int dsa_port = airoha_get_dsa_port(&dev);
-       struct airoha_foe_mac_info_common *l2;
-       u32 qdata, ports_pad, val;
-+      u8 smac_id = 0xf;
-       memset(hwe, 0, sizeof(*hwe));
-@@ -257,6 +258,8 @@ static int airoha_ppe_foe_entry_prepare(
-                */
-               if (airhoa_is_lan_gdm_port(port))
-                       val |= AIROHA_FOE_IB2_FAST_PATH;
-+
-+              smac_id = port->id;
-       }
-       if (is_multicast_ether_addr(data->eth.h_dest))
-@@ -291,7 +294,7 @@ static int airoha_ppe_foe_entry_prepare(
-               hwe->ipv4.l2.src_mac_lo =
-                       get_unaligned_be16(data->eth.h_source + 4);
-       } else {
--              l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf);
-+              l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
-       }
-       if (data->vlan.num) {
-@@ -1238,6 +1241,27 @@ void airoha_ppe_check_skb(struct airoha_
-       airoha_ppe_foe_insert_entry(ppe, skb, hash);
- }
-+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
-+{
-+      struct airoha_eth *eth = port->qdma->eth;
-+      struct net_device *dev = port->dev;
-+      const u8 *addr = dev->dev_addr;
-+      u32 val;
-+
-+      val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
-+      airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
-+      airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
-+                   FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
-+                   PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
-+
-+      val = (addr[0] << 8) | addr[1];
-+      airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
-+      airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
-+                   FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
-+                   FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
-+                   PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
-+}
-+
- int airoha_ppe_init(struct airoha_eth *eth)
- {
-       struct airoha_ppe *ppe;
---- a/drivers/net/ethernet/airoha/airoha_regs.h
-+++ b/drivers/net/ethernet/airoha/airoha_regs.h
-@@ -313,6 +313,16 @@
- #define REG_PPE_RAM_BASE(_n)                  (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
- #define REG_PPE_RAM_ENTRY(_m, _n)             (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
-+#define REG_UPDMEM_CTRL(_n)                   (((_n) ? PPE2_BASE : PPE1_BASE) + 0x370)
-+#define PPE_UPDMEM_ACK_MASK                   BIT(31)
-+#define PPE_UPDMEM_ADDR_MASK                  GENMASK(11, 8)
-+#define PPE_UPDMEM_OFFSET_MASK                        GENMASK(7, 4)
-+#define PPE_UPDMEM_SEL_MASK                   GENMASK(3, 2)
-+#define PPE_UPDMEM_WR_MASK                    BIT(1)
-+#define PPE_UPDMEM_REQ_MASK                   BIT(0)
-+
-+#define REG_UPDMEM_DATA(_n)                   (((_n) ? PPE2_BASE : PPE1_BASE) + 0x374)
-+
- #define REG_FE_GDM_TX_OK_PKT_CNT_H(_n)                (GDM_BASE(_n) + 0x280)
- #define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n)               (GDM_BASE(_n) + 0x284)
- #define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n)               (GDM_BASE(_n) + 0x288)
diff --git a/target/linux/airoha/patches-6.6/076-02-v6.16-net-airoha-Fix-IPv6-hw-acceleration-in-bridge-mode.patch b/target/linux/airoha/patches-6.6/076-02-v6.16-net-airoha-Fix-IPv6-hw-acceleration-in-bridge-mode.patch
deleted file mode 100644 (file)
index faa7669..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-From 504a577c9b000f9e0e99e1b28616fb4eb369e1ef Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 2 Jun 2025 12:55:38 +0200
-Subject: [PATCH 2/3] net: airoha: Fix IPv6 hw acceleration in bridge mode
-
-ib2 and airoha_foe_mac_info_common have not the same offsets in
-airoha_foe_bridge and airoha_foe_ipv6 structures. Current codebase does
-not accelerate IPv6 traffic in bridge mode since ib2 and l2 info are not
-set properly copying airoha_foe_bridge struct into airoha_foe_ipv6 one
-in airoha_ppe_foe_commit_subflow_entry routine.
-Fix IPv6 hw acceleration in bridge mode resolving ib2 and
-airoha_foe_mac_info_common overwrite in
-airoha_ppe_foe_commit_subflow_entry() and configuring them with proper
-values.
-
-Fixes: cd53f622611f ("net: airoha: Add L2 hw acceleration support")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250602-airoha-flowtable-ipv6-fix-v2-2-3287f8b55214@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_ppe.c | 23 ++++++++++++-----------
- 1 file changed, 12 insertions(+), 11 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -639,7 +639,6 @@ airoha_ppe_foe_commit_subflow_entry(stru
-       u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
-       struct airoha_foe_entry *hwe_p, hwe;
-       struct airoha_flow_table_entry *f;
--      struct airoha_foe_mac_info *l2;
-       int type;
-       hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
-@@ -656,18 +655,20 @@ airoha_ppe_foe_commit_subflow_entry(stru
-       memcpy(&hwe, hwe_p, sizeof(*hwe_p));
-       hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
--      l2 = &hwe.bridge.l2;
--      memcpy(l2, &e->data.bridge.l2, sizeof(*l2));
-       type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
--      if (type == PPE_PKT_TYPE_IPV4_HNAPT)
--              memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
--                     sizeof(hwe.ipv4.new_tuple));
--      else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T &&
--               l2->common.etype == ETH_P_IP)
--              l2->common.etype = ETH_P_IPV6;
-+      if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
-+              memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
-+              hwe.ipv6.ib2 = e->data.bridge.ib2;
-+      } else {
-+              memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
-+                     sizeof(hwe.bridge.l2));
-+              hwe.bridge.ib2 = e->data.bridge.ib2;
-+              if (type == PPE_PKT_TYPE_IPV4_HNAPT)
-+                      memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
-+                             sizeof(hwe.ipv4.new_tuple));
-+      }
--      hwe.bridge.ib2 = e->data.bridge.ib2;
-       hwe.bridge.data = e->data.bridge.data;
-       airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
diff --git a/target/linux/airoha/patches-6.6/076-03-v6.16-net-airoha-Fix-smac_id-configuration-in-bridge-mode.patch b/target/linux/airoha/patches-6.6/076-03-v6.16-net-airoha-Fix-smac_id-configuration-in-bridge-mode.patch
deleted file mode 100644 (file)
index f790d9d..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-From c86fac5365d3a068422beeb508f2741f1a2d734d Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 2 Jun 2025 12:55:39 +0200
-Subject: [PATCH 3/3] net: airoha: Fix smac_id configuration in bridge mode
-
-Set PPE entry smac_id field to 0xf in airoha_ppe_foe_commit_subflow_entry
-routine for IPv6 traffic in order to instruct the hw to keep original
-source mac address for IPv6 hw accelerated traffic in bridge mode.
-
-Fixes: cd53f622611f ("net: airoha: Add L2 hw acceleration support")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250602-airoha-flowtable-ipv6-fix-v2-3-3287f8b55214@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_ppe.c | 5 +++++
- 1 file changed, 5 insertions(+)
-
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -660,6 +660,11 @@ airoha_ppe_foe_commit_subflow_entry(stru
-       if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
-               memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
-               hwe.ipv6.ib2 = e->data.bridge.ib2;
-+              /* setting smac_id to 0xf instruct the hw to keep original
-+               * source mac address
-+               */
-+              hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
-+                                                  0xf);
-       } else {
-               memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
-                      sizeof(hwe.bridge.l2));
diff --git a/target/linux/airoha/patches-6.6/077-v6.17-net-airoha-Add-PPPoE-offload-support.patch b/target/linux/airoha/patches-6.6/077-v6.17-net-airoha-Add-PPPoE-offload-support.patch
deleted file mode 100644 (file)
index 6245f0d..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-From 0097c4195b1d0ca57d15979626c769c74747b5a0 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 9 Jun 2025 22:28:40 +0200
-Subject: [PATCH] net: airoha: Add PPPoE offload support
-
-Introduce flowtable hw acceleration for PPPoE traffic.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250609-b4-airoha-flowtable-pppoe-v1-1-1520fa7711b4@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_ppe.c | 31 ++++++++++++++++++------
- 1 file changed, 23 insertions(+), 8 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -232,6 +232,7 @@ static int airoha_ppe_foe_entry_prepare(
-             FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
-             FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
-             FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
-+            FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
-             AIROHA_FOE_IB1_BIND_TTL;
-       hwe->ib1 = val;
-@@ -281,33 +282,42 @@ static int airoha_ppe_foe_entry_prepare(
-               hwe->ipv6.data = qdata;
-               hwe->ipv6.ib2 = val;
-               l2 = &hwe->ipv6.l2;
-+              l2->etype = ETH_P_IPV6;
-       } else {
-               hwe->ipv4.data = qdata;
-               hwe->ipv4.ib2 = val;
-               l2 = &hwe->ipv4.l2.common;
-+              l2->etype = ETH_P_IP;
-       }
-       l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
-       l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
-       if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
-+              struct airoha_foe_mac_info *mac_info;
-+
-               l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
-               hwe->ipv4.l2.src_mac_lo =
-                       get_unaligned_be16(data->eth.h_source + 4);
-+
-+              mac_info = (struct airoha_foe_mac_info *)l2;
-+              mac_info->pppoe_id = data->pppoe.sid;
-       } else {
--              l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
-+              l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
-+                               FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
-+                                          data->pppoe.sid);
-       }
-       if (data->vlan.num) {
--              l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
-               l2->vlan1 = data->vlan.hdr[0].id;
-               if (data->vlan.num == 2)
-                       l2->vlan2 = data->vlan.hdr[1].id;
--      } else if (dsa_port >= 0) {
--              l2->etype = BIT(15) | BIT(dsa_port);
--      } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
--              l2->etype = ETH_P_IPV6;
--      } else {
--              l2->etype = ETH_P_IP;
-+      }
-+
-+      if (dsa_port >= 0) {
-+              l2->etype = BIT(dsa_port);
-+              l2->etype |= !data->vlan.num ? BIT(15) : 0;
-+      } else if (data->pppoe.num) {
-+              l2->etype = ETH_P_PPP_SES;
-       }
-       return 0;
-@@ -957,6 +967,11 @@ static int airoha_ppe_flow_offload_repla
-               case FLOW_ACTION_VLAN_POP:
-                       break;
-               case FLOW_ACTION_PPPOE_PUSH:
-+                      if (data.pppoe.num == 1 || data.vlan.num == 2)
-+                              return -EOPNOTSUPP;
-+
-+                      data.pppoe.sid = act->pppoe.sid;
-+                      data.pppoe.num++;
-                       break;
-               default:
-                       return -EOPNOTSUPP;
diff --git a/target/linux/airoha/patches-6.6/078-v6.16-net-airoha-Enable-RX-queues-16-31.patch b/target/linux/airoha/patches-6.6/078-v6.16-net-airoha-Enable-RX-queues-16-31.patch
deleted file mode 100644 (file)
index 1550c59..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-From f478d68b653323b691280b40fbd3b8ca1ac75aa2 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 9 Jun 2025 22:40:35 +0200
-Subject: [PATCH] net: airoha: Enable RX queues 16-31
-
-Fix RX_DONE_INT_MASK definition in order to enable RX queues 16-31.
-
-Fixes: f252493e18353 ("net: airoha: Enable multiple IRQ lines support in airoha_eth driver.")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250609-aioha-fix-rx-queue-mask-v1-1-f33706a06fa2@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_regs.h | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/airoha/airoha_regs.h
-+++ b/drivers/net/ethernet/airoha/airoha_regs.h
-@@ -614,8 +614,9 @@
-        RX19_DONE_INT_MASK | RX18_DONE_INT_MASK |      \
-        RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
--#define RX_DONE_INT_MASK      (RX_DONE_HIGH_INT_MASK | RX_DONE_LOW_INT_MASK)
- #define RX_DONE_HIGH_OFFSET   fls(RX_DONE_HIGH_INT_MASK)
-+#define RX_DONE_INT_MASK      \
-+      ((RX_DONE_HIGH_INT_MASK << RX_DONE_HIGH_OFFSET) | RX_DONE_LOW_INT_MASK)
- #define INT_RX2_MASK(_n)                              \
-       ((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) |        \
diff --git a/target/linux/airoha/patches-6.6/079-v6.16-net-airoha-Always-check-return-value-from-airoha_ppe.patch b/target/linux/airoha/patches-6.6/079-v6.16-net-airoha-Always-check-return-value-from-airoha_ppe.patch
deleted file mode 100644 (file)
index 551e8e3..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-From 78bd03ee1f20a267d2c218884b66041b3508ac9c Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Wed, 18 Jun 2025 09:37:40 +0200
-Subject: [PATCH] net: airoha: Always check return value from
- airoha_ppe_foe_get_entry()
-
-airoha_ppe_foe_get_entry routine can return NULL, so check the returned
-pointer is not NULL in airoha_ppe_foe_flow_l2_entry_update()
-
-Fixes: b81e0f2b58be3 ("net: airoha: Add FLOW_CLS_STATS callback support")
-Reviewed-by: Simon Horman <horms@kernel.org>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250618-check-ret-from-airoha_ppe_foe_get_entry-v2-1-068dcea3cc66@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_ppe.c | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -819,8 +819,10 @@ airoha_ppe_foe_flow_l2_entry_update(stru
-               int idle;
-               hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
--              ib1 = READ_ONCE(hwe->ib1);
-+              if (!hwe)
-+                      continue;
-+              ib1 = READ_ONCE(hwe->ib1);
-               state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
-               if (state != AIROHA_FOE_STATE_BIND) {
-                       iter->hash = 0xffff;
diff --git a/target/linux/airoha/patches-6.6/080-01-v6.16-net-airoha-Compute-number-of-descriptors-according-t.patch b/target/linux/airoha/patches-6.6/080-01-v6.16-net-airoha-Compute-number-of-descriptors-according-t.patch
deleted file mode 100644 (file)
index 9d419c3..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-From edf8afeecfbb0b8c1a2edb8c8892d2f759d35321 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 19 Jun 2025 09:07:24 +0200
-Subject: [PATCH 1/2] net: airoha: Compute number of descriptors according to
- reserved memory size
-
-In order to not exceed the reserved memory size for hwfd buffers,
-compute the number of hwfd buffers/descriptors according to the
-reserved memory size and the size of each hwfd buffer (2KB).
-
-Fixes: 3a1ce9e3d01b ("net: airoha: Add the capability to allocate hwfd buffers via reserved-memory")
-Reviewed-by: Simon Horman <horms@kernel.org>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250619-airoha-hw-num-desc-v4-1-49600a9b319a@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 21 ++++++++++++---------
- 1 file changed, 12 insertions(+), 9 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -1066,19 +1066,13 @@ static void airoha_qdma_cleanup_tx_queue
- static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
- {
-+      int size, index, num_desc = HW_DSCP_NUM;
-       struct airoha_eth *eth = qdma->eth;
-       int id = qdma - &eth->qdma[0];
-       dma_addr_t dma_addr;
-       const char *name;
--      int size, index;
-       u32 status;
--      size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
--      if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
--              return -ENOMEM;
--
--      airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
--
-       name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
-       if (!name)
-               return -ENOMEM;
-@@ -1100,8 +1094,12 @@ static int airoha_qdma_init_hfwd_queues(
-               rmem = of_reserved_mem_lookup(np);
-               of_node_put(np);
-               dma_addr = rmem->base;
-+              /* Compute the number of hw descriptors according to the
-+               * reserved memory size and the payload buffer size
-+               */
-+              num_desc = rmem->size / AIROHA_MAX_PACKET_SIZE;
-       } else {
--              size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
-+              size = AIROHA_MAX_PACKET_SIZE * num_desc;
-               if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
-                                        GFP_KERNEL))
-                       return -ENOMEM;
-@@ -1109,6 +1107,11 @@ static int airoha_qdma_init_hfwd_queues(
-       airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
-+      size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
-+      if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
-+              return -ENOMEM;
-+
-+      airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
-       airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
-                       HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
-                       FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
-@@ -1117,7 +1120,7 @@ static int airoha_qdma_init_hfwd_queues(
-       airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
-                       LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
-                       HW_FWD_DESC_NUM_MASK,
--                      FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
-+                      FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
-                       LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
-       return read_poll_timeout(airoha_qdma_rr, status,
diff --git a/target/linux/airoha/patches-6.6/080-02-v6.16-net-airoha-Differentiate-hwfd-buffer-size-for-QDMA0-.patch b/target/linux/airoha/patches-6.6/080-02-v6.16-net-airoha-Differentiate-hwfd-buffer-size-for-QDMA0-.patch
deleted file mode 100644 (file)
index d47fc43..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-From 7b46bdaec00a675f6fac9d0b01a2105b5746ebe9 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 19 Jun 2025 09:07:25 +0200
-Subject: [PATCH 2/2] net: airoha: Differentiate hwfd buffer size for QDMA0 and
- QDMA1
-
-EN7581 SoC allows configuring the size and the number of buffers in
-hwfd payload queue for both QDMA0 and QDMA1.
-In order to reduce the required DRAM used for hwfd buffers queues and
-decrease the memory footprint, differentiate hwfd buffer size for QDMA0
-and QDMA1 and reduce hwfd buffer size to 1KB for QDMA1 (WAN) while
-maintaining 2KB for QDMA0 (LAN).
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/20250619-airoha-hw-num-desc-v4-2-49600a9b319a@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 10 ++++++----
- 1 file changed, 6 insertions(+), 4 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -1069,14 +1069,15 @@ static int airoha_qdma_init_hfwd_queues(
-       int size, index, num_desc = HW_DSCP_NUM;
-       struct airoha_eth *eth = qdma->eth;
-       int id = qdma - &eth->qdma[0];
-+      u32 status, buf_size;
-       dma_addr_t dma_addr;
-       const char *name;
--      u32 status;
-       name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
-       if (!name)
-               return -ENOMEM;
-+      buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE;
-       index = of_property_match_string(eth->dev->of_node,
-                                        "memory-region-names", name);
-       if (index >= 0) {
-@@ -1097,9 +1098,9 @@ static int airoha_qdma_init_hfwd_queues(
-               /* Compute the number of hw descriptors according to the
-                * reserved memory size and the payload buffer size
-                */
--              num_desc = rmem->size / AIROHA_MAX_PACKET_SIZE;
-+              num_desc = div_u64(rmem->size, buf_size);
-       } else {
--              size = AIROHA_MAX_PACKET_SIZE * num_desc;
-+              size = buf_size * num_desc;
-               if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
-                                        GFP_KERNEL))
-                       return -ENOMEM;
-@@ -1112,9 +1113,10 @@ static int airoha_qdma_init_hfwd_queues(
-               return -ENOMEM;
-       airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
-+      /* QDMA0: 2KB. QDMA1: 1KB */
-       airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
-                       HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
--                      FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
-+                      FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id));
-       airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
-                       FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
-       airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
diff --git a/target/linux/airoha/patches-6.6/081-v6.17-net-airoha-Fix-PPE-table-access-in-airoha_ppe_debugf.patch b/target/linux/airoha/patches-6.6/081-v6.17-net-airoha-Fix-PPE-table-access-in-airoha_ppe_debugf.patch
deleted file mode 100644 (file)
index 919b6b4..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-From 38358fa3cc8e16c6862a3e5c5c233f9f652e3a6d Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 31 Jul 2025 12:29:08 +0200
-Subject: [PATCH] net: airoha: Fix PPE table access in
- airoha_ppe_debugfs_foe_show()
-
-In order to avoid any possible race we need to hold the ppe_lock
-spinlock accessing the hw PPE table. airoha_ppe_foe_get_entry routine is
-always executed holding ppe_lock except in airoha_ppe_debugfs_foe_show
-routine. Fix the problem introducing airoha_ppe_foe_get_entry_locked
-routine.
-
-Fixes: 3fe15c640f380 ("net: airoha: Introduce PPE debugfs support")
-Reviewed-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250731-airoha_ppe_foe_get_entry_locked-v2-1-50efbd8c0fd6@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_ppe.c | 26 ++++++++++++++++++------
- 1 file changed, 20 insertions(+), 6 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -508,9 +508,11 @@ static void airoha_ppe_foe_flow_stats_up
-               FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
- }
--struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
--                                                u32 hash)
-+static struct airoha_foe_entry *
-+airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
- {
-+      lockdep_assert_held(&ppe_lock);
-+
-       if (hash < PPE_SRAM_NUM_ENTRIES) {
-               u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
-               struct airoha_eth *eth = ppe->eth;
-@@ -537,6 +539,18 @@ struct airoha_foe_entry *airoha_ppe_foe_
-       return ppe->foe + hash * sizeof(struct airoha_foe_entry);
- }
-+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
-+                                                u32 hash)
-+{
-+      struct airoha_foe_entry *hwe;
-+
-+      spin_lock_bh(&ppe_lock);
-+      hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
-+      spin_unlock_bh(&ppe_lock);
-+
-+      return hwe;
-+}
-+
- static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
-                                        struct airoha_foe_entry *hwe)
- {
-@@ -651,7 +665,7 @@ airoha_ppe_foe_commit_subflow_entry(stru
-       struct airoha_flow_table_entry *f;
-       int type;
--      hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
-+      hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
-       if (!hwe_p)
-               return -EINVAL;
-@@ -703,7 +717,7 @@ static void airoha_ppe_foe_insert_entry(
-       spin_lock_bh(&ppe_lock);
--      hwe = airoha_ppe_foe_get_entry(ppe, hash);
-+      hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
-       if (!hwe)
-               goto unlock;
-@@ -818,7 +832,7 @@ airoha_ppe_foe_flow_l2_entry_update(stru
-               u32 ib1, state;
-               int idle;
--              hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
-+              hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
-               if (!hwe)
-                       continue;
-@@ -855,7 +869,7 @@ static void airoha_ppe_foe_flow_entry_up
-       if (e->hash == 0xffff)
-               goto unlock;
--      hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
-+      hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
-       if (!hwe_p)
-               goto unlock;
diff --git a/target/linux/airoha/patches-6.6/082-v6.17-net-airoha-ppe-Do-not-invalid-PPE-entries-in-case-of.patch b/target/linux/airoha/patches-6.6/082-v6.17-net-airoha-ppe-Do-not-invalid-PPE-entries-in-case-of.patch
deleted file mode 100644 (file)
index eda914a..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-From 9f6b606b6b37e61427412708411e8e04b1a858e8 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 18 Aug 2025 11:58:25 +0200
-Subject: [PATCH] net: airoha: ppe: Do not invalid PPE entries in case of SW
- hash collision
-
-SW hash computed by airoha_ppe_foe_get_entry_hash routine (used for
-foe_flow hlist) can theoretically produce collisions between two
-different HW PPE entries.
-In airoha_ppe_foe_insert_entry() if the collision occurs we will mark
-the second PPE entry in the list as stale (setting the hw hash to 0xffff).
-Stale entries are no more updated in airoha_ppe_foe_flow_entry_update
-routine and so they are removed by Netfilter.
-Fix the problem not marking the second entry as stale in
-airoha_ppe_foe_insert_entry routine if we have already inserted the
-brand new entry in the PPE table and let Netfilter remove real stale
-entries according to their timestamp.
-Please note this is just a theoretical issue spotted reviewing the code
-and not faced running the system.
-
-Fixes: cd53f622611f9 ("net: airoha: Add L2 hw acceleration support")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250818-airoha-en7581-hash-collision-fix-v1-1-d190c4b53d1c@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_ppe.c | 4 +---
- 1 file changed, 1 insertion(+), 3 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -736,10 +736,8 @@ static void airoha_ppe_foe_insert_entry(
-                       continue;
-               }
--              if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
--                      e->hash = 0xffff;
-+              if (!airoha_ppe_foe_compare_entry(e, hwe))
-                       continue;
--              }
-               airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
-               commit_done = true;
diff --git a/target/linux/airoha/patches-6.6/083-01-v6.13-resource-Add-resource-set-range-and-size-helpers.patch b/target/linux/airoha/patches-6.6/083-01-v6.13-resource-Add-resource-set-range-and-size-helpers.patch
deleted file mode 100644 (file)
index 36a5300..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-From 9fb6fef0fb49124291837af1da5028f79d53f98e Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= <ilpo.jarvinen@linux.intel.com>
-Date: Fri, 14 Jun 2024 13:06:03 +0300
-Subject: [PATCH] resource: Add resource set range and size helpers
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Setting the end address for a resource with a given size lacks a helper and
-is therefore coded manually unlike the getter side which has a helper for
-resource size calculation. Also, almost all callsites that calculate the
-end address for a resource also set the start address right before it like
-this:
-
-  res->start = start_addr;
-  res->end = res->start + size - 1;
-
-Add resource_set_range(res, start_addr, size) that sets the start address
-and calculates the end address to simplify this often repeated fragment.
-
-Also add resource_set_size() for the cases where setting the start address
-of the resource is not necessary but mention in its kerneldoc that
-resource_set_range() is preferred when setting both addresses.
-
-Link: https://lore.kernel.org/r/20240614100606.15830-2-ilpo.jarvinen@linux.intel.com
-Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
-Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
-Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
----
- include/linux/ioport.h | 32 ++++++++++++++++++++++++++++++++
- 1 file changed, 32 insertions(+)
-
---- a/include/linux/ioport.h
-+++ b/include/linux/ioport.h
-@@ -216,6 +216,38 @@ struct resource *lookup_resource(struct
- int adjust_resource(struct resource *res, resource_size_t start,
-                   resource_size_t size);
- resource_size_t resource_alignment(struct resource *res);
-+
-+/**
-+ * resource_set_size - Calculate resource end address from size and start
-+ * @res: Resource descriptor
-+ * @size: Size of the resource
-+ *
-+ * Calculate the end address for @res based on @size.
-+ *
-+ * Note: The start address of @res must be set when calling this function.
-+ * Prefer resource_set_range() if setting both the start address and @size.
-+ */
-+static inline void resource_set_size(struct resource *res, resource_size_t size)
-+{
-+      res->end = res->start + size - 1;
-+}
-+
-+/**
-+ * resource_set_range - Set resource start and end addresses
-+ * @res: Resource descriptor
-+ * @start: Start address for the resource
-+ * @size: Size of the resource
-+ *
-+ * Set @res start address and calculate the end address based on @size.
-+ */
-+static inline void resource_set_range(struct resource *res,
-+                                    resource_size_t start,
-+                                    resource_size_t size)
-+{
-+      res->start = start;
-+      resource_set_size(res, size);
-+}
-+
- static inline resource_size_t resource_size(const struct resource *res)
- {
-       return res->end - res->start + 1;
diff --git a/target/linux/airoha/patches-6.6/083-02-v6.16-of-reserved_mem-Add-functions-to-parse-memory-region.patch b/target/linux/airoha/patches-6.6/083-02-v6.16-of-reserved_mem-Add-functions-to-parse-memory-region.patch
deleted file mode 100644 (file)
index cbfec9a..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-From f4fcfdda2fd8834c62dcb9bfddcf1f89d190b70e Mon Sep 17 00:00:00 2001
-From: "Rob Herring (Arm)" <robh@kernel.org>
-Date: Wed, 23 Apr 2025 14:42:13 -0500
-Subject: [PATCH] of: reserved_mem: Add functions to parse "memory-region"
-
-Drivers with "memory-region" properties currently have to do their own
-parsing of "memory-region" properties. The result is all the drivers
-have similar patterns of a call to parse "memory-region" and then get
-the region's address and size. As this is a standard property, it should
-have common functions for drivers to use. Add new functions to count the
-number of regions and retrieve the region's address as a resource.
-
-Reviewed-by: Daniel Baluta <daniel.baluta@nxp.com>
-Acked-by: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
-Link: https://lore.kernel.org/r/20250423-dt-memory-region-v2-v2-1-2fbd6ebd3c88@kernel.org
-Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
----
- drivers/of/of_reserved_mem.c    | 80 +++++++++++++++++++++++++++++++++
- include/linux/of_reserved_mem.h | 26 +++++++++++
- 2 files changed, 106 insertions(+)
-
---- a/drivers/of/of_reserved_mem.c
-+++ b/drivers/of/of_reserved_mem.c
-@@ -12,6 +12,7 @@
- #define pr_fmt(fmt)   "OF: reserved mem: " fmt
- #include <linux/err.h>
-+#include <linux/ioport.h>
- #include <linux/of.h>
- #include <linux/of_fdt.h>
- #include <linux/of_platform.h>
-@@ -514,3 +515,82 @@ struct reserved_mem *of_reserved_mem_loo
-       return NULL;
- }
- EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
-+
-+/**
-+ * of_reserved_mem_region_to_resource() - Get a reserved memory region as a resource
-+ * @np:               node containing 'memory-region' property
-+ * @idx:      index of 'memory-region' property to lookup
-+ * @res:      Pointer to a struct resource to fill in with reserved region
-+ *
-+ * This function allows drivers to lookup a node's 'memory-region' property
-+ * entries by index and return a struct resource for the entry.
-+ *
-+ * Returns 0 on success with @res filled in. Returns -ENODEV if 'memory-region'
-+ * is missing or unavailable, -EINVAL for any other error.
-+ */
-+int of_reserved_mem_region_to_resource(const struct device_node *np,
-+                                     unsigned int idx, struct resource *res)
-+{
-+      struct reserved_mem *rmem;
-+
-+      if (!np)
-+              return -EINVAL;
-+
-+      struct device_node __free(device_node) *target = of_parse_phandle(np, "memory-region", idx);
-+      if (!target || !of_device_is_available(target))
-+              return -ENODEV;
-+
-+      rmem = of_reserved_mem_lookup(target);
-+      if (!rmem)
-+              return -EINVAL;
-+
-+      resource_set_range(res, rmem->base, rmem->size);
-+      res->name = rmem->name;
-+      return 0;
-+}
-+EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource);
-+
-+/**
-+ * of_reserved_mem_region_to_resource_byname() - Get a reserved memory region as a resource
-+ * @np:               node containing 'memory-region' property
-+ * @name:     name of 'memory-region' property entry to lookup
-+ * @res:      Pointer to a struct resource to fill in with reserved region
-+ *
-+ * This function allows drivers to lookup a node's 'memory-region' property
-+ * entries by name and return a struct resource for the entry.
-+ *
-+ * Returns 0 on success with @res filled in, or a negative error-code on
-+ * failure.
-+ */
-+int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
-+                                            const char *name,
-+                                            struct resource *res)
-+{
-+      int idx;
-+
-+      if (!name)
-+              return -EINVAL;
-+
-+      idx = of_property_match_string(np, "memory-region-names", name);
-+      if (idx < 0)
-+              return idx;
-+
-+      return of_reserved_mem_region_to_resource(np, idx, res);
-+}
-+EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource_byname);
-+
-+/**
-+ * of_reserved_mem_region_count() - Return the number of 'memory-region' entries
-+ * @np:               node containing 'memory-region' property
-+ *
-+ * This function allows drivers to retrieve the number of entries for a node's
-+ * 'memory-region' property.
-+ *
-+ * Returns the number of entries on success, or negative error code on a
-+ * malformed property.
-+ */
-+int of_reserved_mem_region_count(const struct device_node *np)
-+{
-+      return of_count_phandle_with_args(np, "memory-region", NULL);
-+}
-+EXPORT_SYMBOL_GPL(of_reserved_mem_region_count);
---- a/include/linux/of_reserved_mem.h
-+++ b/include/linux/of_reserved_mem.h
-@@ -7,6 +7,7 @@
- struct of_phandle_args;
- struct reserved_mem_ops;
-+struct resource;
- struct reserved_mem {
-       const char                      *name;
-@@ -40,6 +41,12 @@ int of_reserved_mem_device_init_by_name(
- void of_reserved_mem_device_release(struct device *dev);
- struct reserved_mem *of_reserved_mem_lookup(struct device_node *np);
-+int of_reserved_mem_region_to_resource(const struct device_node *np,
-+                                     unsigned int idx, struct resource *res);
-+int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
-+                                            const char *name, struct resource *res);
-+int of_reserved_mem_region_count(const struct device_node *np);
-+
- #else
- #define RESERVEDMEM_OF_DECLARE(name, compat, init)                    \
-@@ -64,6 +71,25 @@ static inline struct reserved_mem *of_re
- {
-       return NULL;
- }
-+
-+static inline int of_reserved_mem_region_to_resource(const struct device_node *np,
-+                                                   unsigned int idx,
-+                                                   struct resource *res)
-+{
-+      return -ENOSYS;
-+}
-+
-+static inline int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
-+                                                          const char *name,
-+                                                          struct resource *res)
-+{
-+      return -ENOSYS;
-+}
-+
-+static inline int of_reserved_mem_region_count(const struct device_node *np)
-+{
-+      return 0;
-+}
- #endif
- /**
diff --git a/target/linux/airoha/patches-6.6/084-01-v6.18-net-airoha-npu-Add-NPU-wlan-memory-initialization-co.patch b/target/linux/airoha/patches-6.6/084-01-v6.18-net-airoha-npu-Add-NPU-wlan-memory-initialization-co.patch
deleted file mode 100644 (file)
index 7e9e942..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-From 564923b02c1d2fe02ee789f9849ff79979b63b9f Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 11 Aug 2025 17:31:37 +0200
-Subject: [PATCH 1/6] net: airoha: npu: Add NPU wlan memory initialization
- commands
-
-Introduce wlan_init_reserved_memory callback used by MT76 driver during
-NPU wlan offloading setup.
-This is a preliminary patch to enable wlan flowtable offload for EN7581
-SoC with MT76 driver.
-
-Reviewed-by: Simon Horman <horms@kernel.org>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250811-airoha-en7581-wlan-offlaod-v7-2-58823603bb4e@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_npu.c | 82 ++++++++++++++++++++++++
- drivers/net/ethernet/airoha/airoha_npu.h | 38 +++++++++++
- 2 files changed, 120 insertions(+)
-
---- a/drivers/net/ethernet/airoha/airoha_npu.c
-+++ b/drivers/net/ethernet/airoha/airoha_npu.c
-@@ -124,6 +124,13 @@ struct ppe_mbox_data {
-       };
- };
-+struct wlan_mbox_data {
-+      u32 ifindex:4;
-+      u32 func_type:4;
-+      u32 func_id;
-+      DECLARE_FLEX_ARRAY(u8, d);
-+};
-+
- static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
-                              void *p, int size)
- {
-@@ -390,6 +397,80 @@ out:
-       return err;
- }
-+static int airoha_npu_wlan_msg_send(struct airoha_npu *npu, int ifindex,
-+                                  enum airoha_npu_wlan_set_cmd func_id,
-+                                  void *data, int data_len, gfp_t gfp)
-+{
-+      struct wlan_mbox_data *wlan_data;
-+      int err, len;
-+
-+      len = sizeof(*wlan_data) + data_len;
-+      wlan_data = kzalloc(len, gfp);
-+      if (!wlan_data)
-+              return -ENOMEM;
-+
-+      wlan_data->ifindex = ifindex;
-+      wlan_data->func_type = NPU_OP_SET;
-+      wlan_data->func_id = func_id;
-+      memcpy(wlan_data->d, data, data_len);
-+
-+      err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
-+      kfree(wlan_data);
-+
-+      return err;
-+}
-+
-+static int
-+airoha_npu_wlan_set_reserved_memory(struct airoha_npu *npu,
-+                                  int ifindex, const char *name,
-+                                  enum airoha_npu_wlan_set_cmd func_id)
-+{
-+      struct device *dev = npu->dev;
-+      struct resource res;
-+      int err;
-+      u32 val;
-+
-+      err = of_reserved_mem_region_to_resource_byname(dev->of_node, name,
-+                                                      &res);
-+      if (err)
-+              return err;
-+
-+      val = res.start;
-+      return airoha_npu_wlan_msg_send(npu, ifindex, func_id, &val,
-+                                      sizeof(val), GFP_KERNEL);
-+}
-+
-+static int airoha_npu_wlan_init_memory(struct airoha_npu *npu)
-+{
-+      enum airoha_npu_wlan_set_cmd cmd = WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU;
-+      u32 val = 0;
-+      int err;
-+
-+      err = airoha_npu_wlan_msg_send(npu, 1, cmd, &val, sizeof(val),
-+                                     GFP_KERNEL);
-+      if (err)
-+              return err;
-+
-+      cmd = WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR;
-+      err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-bufid", cmd);
-+      if (err)
-+              return err;
-+
-+      cmd = WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR;
-+      err = airoha_npu_wlan_set_reserved_memory(npu, 0, "pkt", cmd);
-+      if (err)
-+              return err;
-+
-+      cmd = WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR;
-+      err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-pkt", cmd);
-+      if (err)
-+              return err;
-+
-+      cmd = WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU;
-+      return airoha_npu_wlan_msg_send(npu, 0, cmd, &val, sizeof(val),
-+                                      GFP_KERNEL);
-+}
-+
- struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
- {
-       struct platform_device *pdev;
-@@ -493,6 +574,7 @@ static int airoha_npu_probe(struct platf
-       npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
-       npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
-       npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
-+      npu->ops.wlan_init_reserved_memory = airoha_npu_wlan_init_memory;
-       npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
-       if (IS_ERR(npu->regmap))
---- a/drivers/net/ethernet/airoha/airoha_npu.h
-+++ b/drivers/net/ethernet/airoha/airoha_npu.h
-@@ -6,6 +6,43 @@
- #define NPU_NUM_CORES         8
-+enum airoha_npu_wlan_set_cmd {
-+      WLAN_FUNC_SET_WAIT_PCIE_ADDR,
-+      WLAN_FUNC_SET_WAIT_DESC,
-+      WLAN_FUNC_SET_WAIT_NPU_INIT_DONE,
-+      WLAN_FUNC_SET_WAIT_TRAN_TO_CPU,
-+      WLAN_FUNC_SET_WAIT_BA_WIN_SIZE,
-+      WLAN_FUNC_SET_WAIT_DRIVER_MODEL,
-+      WLAN_FUNC_SET_WAIT_DEL_STA,
-+      WLAN_FUNC_SET_WAIT_DRAM_BA_NODE_ADDR,
-+      WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR,
-+      WLAN_FUNC_SET_WAIT_IS_TEST_NOBA,
-+      WLAN_FUNC_SET_WAIT_FLUSHONE_TIMEOUT,
-+      WLAN_FUNC_SET_WAIT_FLUSHALL_TIMEOUT,
-+      WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU,
-+      WLAN_FUNC_SET_WAIT_PCIE_STATE,
-+      WLAN_FUNC_SET_WAIT_PCIE_PORT_TYPE,
-+      WLAN_FUNC_SET_WAIT_ERROR_RETRY_TIMES,
-+      WLAN_FUNC_SET_WAIT_BAR_INFO,
-+      WLAN_FUNC_SET_WAIT_FAST_FLAG,
-+      WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU,
-+      WLAN_FUNC_SET_WAIT_TX_RING_PCIE_ADDR,
-+      WLAN_FUNC_SET_WAIT_TX_DESC_HW_BASE,
-+      WLAN_FUNC_SET_WAIT_TX_BUF_SPACE_HW_BASE,
-+      WLAN_FUNC_SET_WAIT_RX_RING_FOR_TXDONE_HW_BASE,
-+      WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR,
-+      WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR,
-+      WLAN_FUNC_SET_WAIT_INODE_DEBUG_FLAG,
-+      WLAN_FUNC_SET_WAIT_INODE_HW_CFG_INFO,
-+      WLAN_FUNC_SET_WAIT_INODE_STOP_ACTION,
-+      WLAN_FUNC_SET_WAIT_INODE_PCIE_SWAP,
-+      WLAN_FUNC_SET_WAIT_RATELIMIT_CTRL,
-+      WLAN_FUNC_SET_WAIT_HWNAT_INIT,
-+      WLAN_FUNC_SET_WAIT_ARHT_CHIP_INFO,
-+      WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR,
-+      WLAN_FUNC_SET_WAIT_TOKEN_ID_SIZE,
-+};
-+
- struct airoha_npu {
-       struct device *dev;
-       struct regmap *regmap;
-@@ -29,6 +66,7 @@ struct airoha_npu {
-                                           dma_addr_t foe_addr,
-                                           u32 entry_size, u32 hash,
-                                           bool ppe2);
-+              int (*wlan_init_reserved_memory)(struct airoha_npu *npu);
-       } ops;
- };
diff --git a/target/linux/airoha/patches-6.6/084-02-v6.18-net-airoha-npu-Add-wlan_-send-get-_msg-NPU-callbacks.patch b/target/linux/airoha/patches-6.6/084-02-v6.18-net-airoha-npu-Add-wlan_-send-get-_msg-NPU-callbacks.patch
deleted file mode 100644 (file)
index 5ff820d..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-From f97fc66185b2004ad5f393f78b3e645009ddd1d0 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 11 Aug 2025 17:31:38 +0200
-Subject: [PATCH 2/6] net: airoha: npu: Add wlan_{send,get}_msg NPU callbacks
-
-Introduce wlan_send_msg() and wlan_get_msg() NPU wlan callbacks used
-by the wlan driver (MT76) to initialize NPU module registers in order to
-offload wireless-wired traffic.
-This is a preliminary patch to enable wlan flowtable offload for EN7581
-SoC with MT76 driver.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250811-airoha-en7581-wlan-offlaod-v7-3-58823603bb4e@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_npu.c | 52 ++++++++++++++++++++++++
- drivers/net/ethernet/airoha/airoha_npu.h | 22 ++++++++++
- 2 files changed, 74 insertions(+)
-
---- a/drivers/net/ethernet/airoha/airoha_npu.c
-+++ b/drivers/net/ethernet/airoha/airoha_npu.c
-@@ -42,6 +42,22 @@
- #define REG_CR_MBQ8_CTRL(_n)          (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
- #define REG_CR_NPU_MIB(_n)            (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
-+#define NPU_WLAN_BASE_ADDR            0x30d000
-+
-+#define REG_IRQ_STATUS                        (NPU_WLAN_BASE_ADDR + 0x030)
-+#define REG_IRQ_RXDONE(_n)            (NPU_WLAN_BASE_ADDR + ((_n) << 2) + 0x034)
-+#define NPU_IRQ_RX_MASK(_n)           ((_n) == 1 ? BIT(17) : BIT(16))
-+
-+#define REG_TX_BASE(_n)                       (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x080)
-+#define REG_TX_DSCP_NUM(_n)           (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x084)
-+#define REG_TX_CPU_IDX(_n)            (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x088)
-+#define REG_TX_DMA_IDX(_n)            (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x08c)
-+
-+#define REG_RX_BASE(_n)                       (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x180)
-+#define REG_RX_DSCP_NUM(_n)           (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x184)
-+#define REG_RX_CPU_IDX(_n)            (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x188)
-+#define REG_RX_DMA_IDX(_n)            (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x18c)
-+
- #define NPU_TIMER_BASE_ADDR           0x310100
- #define REG_WDT_TIMER_CTRL(_n)                (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
- #define WDT_EN_MASK                   BIT(25)
-@@ -420,6 +436,30 @@ static int airoha_npu_wlan_msg_send(stru
-       return err;
- }
-+static int airoha_npu_wlan_msg_get(struct airoha_npu *npu, int ifindex,
-+                                 enum airoha_npu_wlan_get_cmd func_id,
-+                                 void *data, int data_len, gfp_t gfp)
-+{
-+      struct wlan_mbox_data *wlan_data;
-+      int err, len;
-+
-+      len = sizeof(*wlan_data) + data_len;
-+      wlan_data = kzalloc(len, gfp);
-+      if (!wlan_data)
-+              return -ENOMEM;
-+
-+      wlan_data->ifindex = ifindex;
-+      wlan_data->func_type = NPU_OP_GET;
-+      wlan_data->func_id = func_id;
-+
-+      err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
-+      if (!err)
-+              memcpy(data, wlan_data->d, data_len);
-+      kfree(wlan_data);
-+
-+      return err;
-+}
-+
- static int
- airoha_npu_wlan_set_reserved_memory(struct airoha_npu *npu,
-                                   int ifindex, const char *name,
-@@ -471,6 +511,15 @@ static int airoha_npu_wlan_init_memory(s
-                                       GFP_KERNEL);
- }
-+static u32 airoha_npu_wlan_queue_addr_get(struct airoha_npu *npu, int qid,
-+                                        bool xmit)
-+{
-+      if (xmit)
-+              return REG_TX_BASE(qid + 2);
-+
-+      return REG_RX_BASE(qid);
-+}
-+
- struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
- {
-       struct platform_device *pdev;
-@@ -575,6 +624,9 @@ static int airoha_npu_probe(struct platf
-       npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
-       npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
-       npu->ops.wlan_init_reserved_memory = airoha_npu_wlan_init_memory;
-+      npu->ops.wlan_send_msg = airoha_npu_wlan_msg_send;
-+      npu->ops.wlan_get_msg = airoha_npu_wlan_msg_get;
-+      npu->ops.wlan_get_queue_addr = airoha_npu_wlan_queue_addr_get;
-       npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
-       if (IS_ERR(npu->regmap))
---- a/drivers/net/ethernet/airoha/airoha_npu.h
-+++ b/drivers/net/ethernet/airoha/airoha_npu.h
-@@ -43,6 +43,20 @@ enum airoha_npu_wlan_set_cmd {
-       WLAN_FUNC_SET_WAIT_TOKEN_ID_SIZE,
- };
-+enum airoha_npu_wlan_get_cmd {
-+      WLAN_FUNC_GET_WAIT_NPU_INFO,
-+      WLAN_FUNC_GET_WAIT_LAST_RATE,
-+      WLAN_FUNC_GET_WAIT_COUNTER,
-+      WLAN_FUNC_GET_WAIT_DBG_COUNTER,
-+      WLAN_FUNC_GET_WAIT_RXDESC_BASE,
-+      WLAN_FUNC_GET_WAIT_WCID_DBG_COUNTER,
-+      WLAN_FUNC_GET_WAIT_DMA_ADDR,
-+      WLAN_FUNC_GET_WAIT_RING_SIZE,
-+      WLAN_FUNC_GET_WAIT_NPU_SUPPORT_MAP,
-+      WLAN_FUNC_GET_WAIT_MDC_LOCK_ADDRESS,
-+      WLAN_FUNC_GET_WAIT_NPU_VERSION,
-+};
-+
- struct airoha_npu {
-       struct device *dev;
-       struct regmap *regmap;
-@@ -67,6 +81,14 @@ struct airoha_npu {
-                                           u32 entry_size, u32 hash,
-                                           bool ppe2);
-               int (*wlan_init_reserved_memory)(struct airoha_npu *npu);
-+              int (*wlan_send_msg)(struct airoha_npu *npu, int ifindex,
-+                                   enum airoha_npu_wlan_set_cmd func_id,
-+                                   void *data, int data_len, gfp_t gfp);
-+              int (*wlan_get_msg)(struct airoha_npu *npu, int ifindex,
-+                                  enum airoha_npu_wlan_get_cmd func_id,
-+                                  void *data, int data_len, gfp_t gfp);
-+              u32 (*wlan_get_queue_addr)(struct airoha_npu *npu, int qid,
-+                                         bool xmit);
-       } ops;
- };
diff --git a/target/linux/airoha/patches-6.6/084-03-v6.18-net-airoha-npu-Add-wlan-irq-management-callbacks.patch b/target/linux/airoha/patches-6.6/084-03-v6.18-net-airoha-npu-Add-wlan-irq-management-callbacks.patch
deleted file mode 100644 (file)
index f05b947..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-From 03b7ca3ee5e1b700c462aed5b6cb88f616d6ba7f Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 11 Aug 2025 17:31:39 +0200
-Subject: [PATCH 3/6] net: airoha: npu: Add wlan irq management callbacks
-
-Introduce callbacks used by the MT76 driver to configure NPU SoC
-interrupts. This is a preliminary patch to enable wlan flowtable
-offload for EN7581 SoC with MT76 driver.
-
-Reviewed-by: Simon Horman <horms@kernel.org>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250811-airoha-en7581-wlan-offlaod-v7-4-58823603bb4e@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_npu.c | 27 ++++++++++++++++++++++++
- drivers/net/ethernet/airoha/airoha_npu.h |  4 ++++
- 2 files changed, 31 insertions(+)
-
---- a/drivers/net/ethernet/airoha/airoha_npu.c
-+++ b/drivers/net/ethernet/airoha/airoha_npu.c
-@@ -520,6 +520,29 @@ static u32 airoha_npu_wlan_queue_addr_ge
-       return REG_RX_BASE(qid);
- }
-+static void airoha_npu_wlan_irq_status_set(struct airoha_npu *npu, u32 val)
-+{
-+      regmap_write(npu->regmap, REG_IRQ_STATUS, val);
-+}
-+
-+static u32 airoha_npu_wlan_irq_status_get(struct airoha_npu *npu, int q)
-+{
-+      u32 val;
-+
-+      regmap_read(npu->regmap, REG_IRQ_STATUS, &val);
-+      return val;
-+}
-+
-+static void airoha_npu_wlan_irq_enable(struct airoha_npu *npu, int q)
-+{
-+      regmap_set_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
-+}
-+
-+static void airoha_npu_wlan_irq_disable(struct airoha_npu *npu, int q)
-+{
-+      regmap_clear_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
-+}
-+
- struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
- {
-       struct platform_device *pdev;
-@@ -627,6 +650,10 @@ static int airoha_npu_probe(struct platf
-       npu->ops.wlan_send_msg = airoha_npu_wlan_msg_send;
-       npu->ops.wlan_get_msg = airoha_npu_wlan_msg_get;
-       npu->ops.wlan_get_queue_addr = airoha_npu_wlan_queue_addr_get;
-+      npu->ops.wlan_set_irq_status = airoha_npu_wlan_irq_status_set;
-+      npu->ops.wlan_get_irq_status = airoha_npu_wlan_irq_status_get;
-+      npu->ops.wlan_enable_irq = airoha_npu_wlan_irq_enable;
-+      npu->ops.wlan_disable_irq = airoha_npu_wlan_irq_disable;
-       npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
-       if (IS_ERR(npu->regmap))
---- a/drivers/net/ethernet/airoha/airoha_npu.h
-+++ b/drivers/net/ethernet/airoha/airoha_npu.h
-@@ -89,6 +89,10 @@ struct airoha_npu {
-                                   void *data, int data_len, gfp_t gfp);
-               u32 (*wlan_get_queue_addr)(struct airoha_npu *npu, int qid,
-                                          bool xmit);
-+              void (*wlan_set_irq_status)(struct airoha_npu *npu, u32 val);
-+              u32 (*wlan_get_irq_status)(struct airoha_npu *npu, int q);
-+              void (*wlan_enable_irq)(struct airoha_npu *npu, int q);
-+              void (*wlan_disable_irq)(struct airoha_npu *npu, int q);
-       } ops;
- };
diff --git a/target/linux/airoha/patches-6.6/084-04-v6.18-net-airoha-npu-Read-NPU-wlan-interrupt-lines-from-th.patch b/target/linux/airoha/patches-6.6/084-04-v6.18-net-airoha-npu-Read-NPU-wlan-interrupt-lines-from-th.patch
deleted file mode 100644 (file)
index 234dc8b..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-From a1740b16c83729d908c760eaa821f27b51e58a13 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 11 Aug 2025 17:31:40 +0200
-Subject: [PATCH 4/6] net: airoha: npu: Read NPU wlan interrupt lines from the
- DTS
-
-Read all NPU wlan IRQ lines from the NPU device-tree node.
-NPU module fires wlan irq lines when the traffic to/from the WiFi NIC is
-not hw accelerated (these interrupts will be consumed by the MT76 driver
-in subsequent patches).
-This is a preliminary patch to enable wlan flowtable offload for EN7581
-SoC.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250811-airoha-en7581-wlan-offlaod-v7-5-58823603bb4e@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_npu.c | 9 +++++++++
- drivers/net/ethernet/airoha/airoha_npu.h | 3 +++
- 2 files changed, 12 insertions(+)
-
---- a/drivers/net/ethernet/airoha/airoha_npu.c
-+++ b/drivers/net/ethernet/airoha/airoha_npu.c
-@@ -696,6 +696,15 @@ static int airoha_npu_probe(struct platf
-               INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
-       }
-+      /* wlan IRQ lines */
-+      for (i = 0; i < ARRAY_SIZE(npu->irqs); i++) {
-+              irq = platform_get_irq(pdev, i + ARRAY_SIZE(npu->cores) + 1);
-+              if (irq < 0)
-+                      return irq;
-+
-+              npu->irqs[i] = irq;
-+      }
-+
-       err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
-       if (err)
-               return err;
---- a/drivers/net/ethernet/airoha/airoha_npu.h
-+++ b/drivers/net/ethernet/airoha/airoha_npu.h
-@@ -5,6 +5,7 @@
-  */
- #define NPU_NUM_CORES         8
-+#define NPU_NUM_IRQ           6
- enum airoha_npu_wlan_set_cmd {
-       WLAN_FUNC_SET_WAIT_PCIE_ADDR,
-@@ -68,6 +69,8 @@ struct airoha_npu {
-               struct work_struct wdt_work;
-       } cores[NPU_NUM_CORES];
-+      int irqs[NPU_NUM_IRQ];
-+
-       struct airoha_foe_stats __iomem *stats;
-       struct {
diff --git a/target/linux/airoha/patches-6.6/084-05-v6.18-net-airoha-npu-Enable-core-3-for-WiFi-offloading.patch b/target/linux/airoha/patches-6.6/084-05-v6.18-net-airoha-npu-Enable-core-3-for-WiFi-offloading.patch
deleted file mode 100644 (file)
index c285af2..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-From 29c4a3ce508961a02d185ead2d52699b16d82c6d Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 11 Aug 2025 17:31:41 +0200
-Subject: [PATCH 5/6] net: airoha: npu: Enable core 3 for WiFi offloading
-
-NPU core 3 is responsible for WiFi offloading so enable it during NPU
-probe.
-
-Reviewed-by: Simon Horman <horms@kernel.org>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250811-airoha-en7581-wlan-offlaod-v7-6-58823603bb4e@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_npu.c | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_npu.c
-+++ b/drivers/net/ethernet/airoha/airoha_npu.c
-@@ -726,8 +726,7 @@ static int airoha_npu_probe(struct platf
-       usleep_range(1000, 2000);
-       /* enable NPU cores */
--      /* do not start core3 since it is used for WiFi offloading */
--      regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xf7);
-+      regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xff);
-       regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
-       msleep(100);
diff --git a/target/linux/airoha/patches-6.6/084-06-v6.18-net-airoha-Add-airoha_offload.h-header.patch b/target/linux/airoha/patches-6.6/084-06-v6.18-net-airoha-Add-airoha_offload.h-header.patch
deleted file mode 100644 (file)
index ef98c85..0000000
+++ /dev/null
@@ -1,416 +0,0 @@
-From b3ef7bdec66fb1813e865fd39d179a93cefd2015 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Mon, 11 Aug 2025 17:31:42 +0200
-Subject: [PATCH 6/6] net: airoha: Add airoha_offload.h header
-
-Move NPU definitions to airoha_offload.h in include/linux/soc/airoha/ in
-order to allow the MT76 driver to access the callback definitions.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250811-airoha-en7581-wlan-offlaod-v7-7-58823603bb4e@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_npu.c  |   2 +-
- drivers/net/ethernet/airoha/airoha_npu.h  | 103 ---------
- drivers/net/ethernet/airoha/airoha_ppe.c  |   2 +-
- include/linux/soc/airoha/airoha_offload.h | 260 ++++++++++++++++++++++
- 4 files changed, 262 insertions(+), 105 deletions(-)
- delete mode 100644 drivers/net/ethernet/airoha/airoha_npu.h
- create mode 100644 include/linux/soc/airoha/airoha_offload.h
-
---- a/drivers/net/ethernet/airoha/airoha_npu.c
-+++ b/drivers/net/ethernet/airoha/airoha_npu.c
-@@ -11,9 +11,9 @@
- #include <linux/of_platform.h>
- #include <linux/of_reserved_mem.h>
- #include <linux/regmap.h>
-+#include <linux/soc/airoha/airoha_offload.h>
- #include "airoha_eth.h"
--#include "airoha_npu.h"
- #define NPU_EN7581_FIRMWARE_DATA              "airoha/en7581_npu_data.bin"
- #define NPU_EN7581_FIRMWARE_RV32              "airoha/en7581_npu_rv32.bin"
---- a/drivers/net/ethernet/airoha/airoha_npu.h
-+++ /dev/null
-@@ -1,103 +0,0 @@
--/* SPDX-License-Identifier: GPL-2.0-only */
--/*
-- * Copyright (c) 2025 AIROHA Inc
-- * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-- */
--
--#define NPU_NUM_CORES         8
--#define NPU_NUM_IRQ           6
--
--enum airoha_npu_wlan_set_cmd {
--      WLAN_FUNC_SET_WAIT_PCIE_ADDR,
--      WLAN_FUNC_SET_WAIT_DESC,
--      WLAN_FUNC_SET_WAIT_NPU_INIT_DONE,
--      WLAN_FUNC_SET_WAIT_TRAN_TO_CPU,
--      WLAN_FUNC_SET_WAIT_BA_WIN_SIZE,
--      WLAN_FUNC_SET_WAIT_DRIVER_MODEL,
--      WLAN_FUNC_SET_WAIT_DEL_STA,
--      WLAN_FUNC_SET_WAIT_DRAM_BA_NODE_ADDR,
--      WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR,
--      WLAN_FUNC_SET_WAIT_IS_TEST_NOBA,
--      WLAN_FUNC_SET_WAIT_FLUSHONE_TIMEOUT,
--      WLAN_FUNC_SET_WAIT_FLUSHALL_TIMEOUT,
--      WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU,
--      WLAN_FUNC_SET_WAIT_PCIE_STATE,
--      WLAN_FUNC_SET_WAIT_PCIE_PORT_TYPE,
--      WLAN_FUNC_SET_WAIT_ERROR_RETRY_TIMES,
--      WLAN_FUNC_SET_WAIT_BAR_INFO,
--      WLAN_FUNC_SET_WAIT_FAST_FLAG,
--      WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU,
--      WLAN_FUNC_SET_WAIT_TX_RING_PCIE_ADDR,
--      WLAN_FUNC_SET_WAIT_TX_DESC_HW_BASE,
--      WLAN_FUNC_SET_WAIT_TX_BUF_SPACE_HW_BASE,
--      WLAN_FUNC_SET_WAIT_RX_RING_FOR_TXDONE_HW_BASE,
--      WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR,
--      WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR,
--      WLAN_FUNC_SET_WAIT_INODE_DEBUG_FLAG,
--      WLAN_FUNC_SET_WAIT_INODE_HW_CFG_INFO,
--      WLAN_FUNC_SET_WAIT_INODE_STOP_ACTION,
--      WLAN_FUNC_SET_WAIT_INODE_PCIE_SWAP,
--      WLAN_FUNC_SET_WAIT_RATELIMIT_CTRL,
--      WLAN_FUNC_SET_WAIT_HWNAT_INIT,
--      WLAN_FUNC_SET_WAIT_ARHT_CHIP_INFO,
--      WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR,
--      WLAN_FUNC_SET_WAIT_TOKEN_ID_SIZE,
--};
--
--enum airoha_npu_wlan_get_cmd {
--      WLAN_FUNC_GET_WAIT_NPU_INFO,
--      WLAN_FUNC_GET_WAIT_LAST_RATE,
--      WLAN_FUNC_GET_WAIT_COUNTER,
--      WLAN_FUNC_GET_WAIT_DBG_COUNTER,
--      WLAN_FUNC_GET_WAIT_RXDESC_BASE,
--      WLAN_FUNC_GET_WAIT_WCID_DBG_COUNTER,
--      WLAN_FUNC_GET_WAIT_DMA_ADDR,
--      WLAN_FUNC_GET_WAIT_RING_SIZE,
--      WLAN_FUNC_GET_WAIT_NPU_SUPPORT_MAP,
--      WLAN_FUNC_GET_WAIT_MDC_LOCK_ADDRESS,
--      WLAN_FUNC_GET_WAIT_NPU_VERSION,
--};
--
--struct airoha_npu {
--      struct device *dev;
--      struct regmap *regmap;
--
--      struct airoha_npu_core {
--              struct airoha_npu *npu;
--              /* protect concurrent npu memory accesses */
--              spinlock_t lock;
--              struct work_struct wdt_work;
--      } cores[NPU_NUM_CORES];
--
--      int irqs[NPU_NUM_IRQ];
--
--      struct airoha_foe_stats __iomem *stats;
--
--      struct {
--              int (*ppe_init)(struct airoha_npu *npu);
--              int (*ppe_deinit)(struct airoha_npu *npu);
--              int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
--                                            dma_addr_t foe_addr,
--                                            int sram_num_entries);
--              int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
--                                          dma_addr_t foe_addr,
--                                          u32 entry_size, u32 hash,
--                                          bool ppe2);
--              int (*wlan_init_reserved_memory)(struct airoha_npu *npu);
--              int (*wlan_send_msg)(struct airoha_npu *npu, int ifindex,
--                                   enum airoha_npu_wlan_set_cmd func_id,
--                                   void *data, int data_len, gfp_t gfp);
--              int (*wlan_get_msg)(struct airoha_npu *npu, int ifindex,
--                                  enum airoha_npu_wlan_get_cmd func_id,
--                                  void *data, int data_len, gfp_t gfp);
--              u32 (*wlan_get_queue_addr)(struct airoha_npu *npu, int qid,
--                                         bool xmit);
--              void (*wlan_set_irq_status)(struct airoha_npu *npu, u32 val);
--              u32 (*wlan_get_irq_status)(struct airoha_npu *npu, int q);
--              void (*wlan_enable_irq)(struct airoha_npu *npu, int q);
--              void (*wlan_disable_irq)(struct airoha_npu *npu, int q);
--      } ops;
--};
--
--struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr);
--void airoha_npu_put(struct airoha_npu *npu);
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -7,10 +7,10 @@
- #include <linux/ip.h>
- #include <linux/ipv6.h>
- #include <linux/rhashtable.h>
-+#include <linux/soc/airoha/airoha_offload.h>
- #include <net/ipv6.h>
- #include <net/pkt_cls.h>
--#include "airoha_npu.h"
- #include "airoha_regs.h"
- #include "airoha_eth.h"
---- /dev/null
-+++ b/include/linux/soc/airoha/airoha_offload.h
-@@ -0,0 +1,260 @@
-+/* SPDX-License-Identifier: GPL-2.0-only */
-+/*
-+ * Copyright (c) 2025 AIROHA Inc
-+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
-+ */
-+#ifndef AIROHA_OFFLOAD_H
-+#define AIROHA_OFFLOAD_H
-+
-+#include <linux/spinlock.h>
-+#include <linux/workqueue.h>
-+
-+#define NPU_NUM_CORES         8
-+#define NPU_NUM_IRQ           6
-+#define NPU_RX0_DESC_NUM      512
-+#define NPU_RX1_DESC_NUM      512
-+
-+/* CTRL */
-+#define NPU_RX_DMA_DESC_LAST_MASK     BIT(29)
-+#define NPU_RX_DMA_DESC_LEN_MASK      GENMASK(28, 15)
-+#define NPU_RX_DMA_DESC_CUR_LEN_MASK  GENMASK(14, 1)
-+#define NPU_RX_DMA_DESC_DONE_MASK     BIT(0)
-+/* INFO */
-+#define NPU_RX_DMA_PKT_COUNT_MASK     GENMASK(31, 28)
-+#define NPU_RX_DMA_PKT_ID_MASK                GENMASK(28, 26)
-+#define NPU_RX_DMA_SRC_PORT_MASK      GENMASK(25, 21)
-+#define NPU_RX_DMA_CRSN_MASK          GENMASK(20, 16)
-+#define NPU_RX_DMA_FOE_ID_MASK                GENMASK(15, 0)
-+/* DATA */
-+#define NPU_RX_DMA_SID_MASK           GENMASK(31, 16)
-+#define NPU_RX_DMA_FRAG_TYPE_MASK     GENMASK(15, 14)
-+#define NPU_RX_DMA_PRIORITY_MASK      GENMASK(13, 10)
-+#define NPU_RX_DMA_RADIO_ID_MASK      GENMASK(9, 6)
-+#define NPU_RX_DMA_VAP_ID_MASK                GENMASK(5, 2)
-+#define NPU_RX_DMA_FRAME_TYPE_MASK    GENMASK(1, 0)
-+
-+struct airoha_npu_rx_dma_desc {
-+      u32 ctrl;
-+      u32 info;
-+      u32 data;
-+      u32 addr;
-+      u64 rsv;
-+} __packed;
-+
-+/* CTRL */
-+#define NPU_TX_DMA_DESC_SCHED_MASK    BIT(31)
-+#define NPU_TX_DMA_DESC_LEN_MASK      GENMASK(30, 18)
-+#define NPU_TX_DMA_DESC_VEND_LEN_MASK GENMASK(17, 1)
-+#define NPU_TX_DMA_DESC_DONE_MASK     BIT(0)
-+
-+#define NPU_TXWI_LEN  192
-+
-+struct airoha_npu_tx_dma_desc {
-+      u32 ctrl;
-+      u32 addr;
-+      u64 rsv;
-+      u8 txwi[NPU_TXWI_LEN];
-+} __packed;
-+
-+enum airoha_npu_wlan_set_cmd {
-+      WLAN_FUNC_SET_WAIT_PCIE_ADDR,
-+      WLAN_FUNC_SET_WAIT_DESC,
-+      WLAN_FUNC_SET_WAIT_NPU_INIT_DONE,
-+      WLAN_FUNC_SET_WAIT_TRAN_TO_CPU,
-+      WLAN_FUNC_SET_WAIT_BA_WIN_SIZE,
-+      WLAN_FUNC_SET_WAIT_DRIVER_MODEL,
-+      WLAN_FUNC_SET_WAIT_DEL_STA,
-+      WLAN_FUNC_SET_WAIT_DRAM_BA_NODE_ADDR,
-+      WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR,
-+      WLAN_FUNC_SET_WAIT_IS_TEST_NOBA,
-+      WLAN_FUNC_SET_WAIT_FLUSHONE_TIMEOUT,
-+      WLAN_FUNC_SET_WAIT_FLUSHALL_TIMEOUT,
-+      WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU,
-+      WLAN_FUNC_SET_WAIT_PCIE_STATE,
-+      WLAN_FUNC_SET_WAIT_PCIE_PORT_TYPE,
-+      WLAN_FUNC_SET_WAIT_ERROR_RETRY_TIMES,
-+      WLAN_FUNC_SET_WAIT_BAR_INFO,
-+      WLAN_FUNC_SET_WAIT_FAST_FLAG,
-+      WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU,
-+      WLAN_FUNC_SET_WAIT_TX_RING_PCIE_ADDR,
-+      WLAN_FUNC_SET_WAIT_TX_DESC_HW_BASE,
-+      WLAN_FUNC_SET_WAIT_TX_BUF_SPACE_HW_BASE,
-+      WLAN_FUNC_SET_WAIT_RX_RING_FOR_TXDONE_HW_BASE,
-+      WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR,
-+      WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR,
-+      WLAN_FUNC_SET_WAIT_INODE_DEBUG_FLAG,
-+      WLAN_FUNC_SET_WAIT_INODE_HW_CFG_INFO,
-+      WLAN_FUNC_SET_WAIT_INODE_STOP_ACTION,
-+      WLAN_FUNC_SET_WAIT_INODE_PCIE_SWAP,
-+      WLAN_FUNC_SET_WAIT_RATELIMIT_CTRL,
-+      WLAN_FUNC_SET_WAIT_HWNAT_INIT,
-+      WLAN_FUNC_SET_WAIT_ARHT_CHIP_INFO,
-+      WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR,
-+      WLAN_FUNC_SET_WAIT_TOKEN_ID_SIZE,
-+};
-+
-+enum airoha_npu_wlan_get_cmd {
-+      WLAN_FUNC_GET_WAIT_NPU_INFO,
-+      WLAN_FUNC_GET_WAIT_LAST_RATE,
-+      WLAN_FUNC_GET_WAIT_COUNTER,
-+      WLAN_FUNC_GET_WAIT_DBG_COUNTER,
-+      WLAN_FUNC_GET_WAIT_RXDESC_BASE,
-+      WLAN_FUNC_GET_WAIT_WCID_DBG_COUNTER,
-+      WLAN_FUNC_GET_WAIT_DMA_ADDR,
-+      WLAN_FUNC_GET_WAIT_RING_SIZE,
-+      WLAN_FUNC_GET_WAIT_NPU_SUPPORT_MAP,
-+      WLAN_FUNC_GET_WAIT_MDC_LOCK_ADDRESS,
-+      WLAN_FUNC_GET_WAIT_NPU_VERSION,
-+};
-+
-+struct airoha_npu {
-+#if (IS_BUILTIN(CONFIG_NET_AIROHA_NPU) || IS_MODULE(CONFIG_NET_AIROHA_NPU))
-+      struct device *dev;
-+      struct regmap *regmap;
-+
-+      struct airoha_npu_core {
-+              struct airoha_npu *npu;
-+              /* protect concurrent npu memory accesses */
-+              spinlock_t lock;
-+              struct work_struct wdt_work;
-+      } cores[NPU_NUM_CORES];
-+
-+      int irqs[NPU_NUM_IRQ];
-+
-+      struct airoha_foe_stats __iomem *stats;
-+
-+      struct {
-+              int (*ppe_init)(struct airoha_npu *npu);
-+              int (*ppe_deinit)(struct airoha_npu *npu);
-+              int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
-+                                            dma_addr_t foe_addr,
-+                                            int sram_num_entries);
-+              int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
-+                                          dma_addr_t foe_addr,
-+                                          u32 entry_size, u32 hash,
-+                                          bool ppe2);
-+              int (*wlan_init_reserved_memory)(struct airoha_npu *npu);
-+              int (*wlan_send_msg)(struct airoha_npu *npu, int ifindex,
-+                                   enum airoha_npu_wlan_set_cmd func_id,
-+                                   void *data, int data_len, gfp_t gfp);
-+              int (*wlan_get_msg)(struct airoha_npu *npu, int ifindex,
-+                                  enum airoha_npu_wlan_get_cmd func_id,
-+                                  void *data, int data_len, gfp_t gfp);
-+              u32 (*wlan_get_queue_addr)(struct airoha_npu *npu, int qid,
-+                                         bool xmit);
-+              void (*wlan_set_irq_status)(struct airoha_npu *npu, u32 val);
-+              u32 (*wlan_get_irq_status)(struct airoha_npu *npu, int q);
-+              void (*wlan_enable_irq)(struct airoha_npu *npu, int q);
-+              void (*wlan_disable_irq)(struct airoha_npu *npu, int q);
-+      } ops;
-+#endif
-+};
-+
-+#if (IS_BUILTIN(CONFIG_NET_AIROHA_NPU) || IS_MODULE(CONFIG_NET_AIROHA_NPU))
-+struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr);
-+void airoha_npu_put(struct airoha_npu *npu);
-+
-+static inline int airoha_npu_wlan_init_reserved_memory(struct airoha_npu *npu)
-+{
-+      return npu->ops.wlan_init_reserved_memory(npu);
-+}
-+
-+static inline int airoha_npu_wlan_send_msg(struct airoha_npu *npu,
-+                                         int ifindex,
-+                                         enum airoha_npu_wlan_set_cmd cmd,
-+                                         void *data, int data_len, gfp_t gfp)
-+{
-+      return npu->ops.wlan_send_msg(npu, ifindex, cmd, data, data_len, gfp);
-+}
-+
-+static inline int airoha_npu_wlan_get_msg(struct airoha_npu *npu, int ifindex,
-+                                        enum airoha_npu_wlan_get_cmd cmd,
-+                                        void *data, int data_len, gfp_t gfp)
-+{
-+      return npu->ops.wlan_get_msg(npu, ifindex, cmd, data, data_len, gfp);
-+}
-+
-+static inline u32 airoha_npu_wlan_get_queue_addr(struct airoha_npu *npu,
-+                                               int qid, bool xmit)
-+{
-+      return npu->ops.wlan_get_queue_addr(npu, qid, xmit);
-+}
-+
-+static inline void airoha_npu_wlan_set_irq_status(struct airoha_npu *npu,
-+                                                u32 val)
-+{
-+      npu->ops.wlan_set_irq_status(npu, val);
-+}
-+
-+static inline u32 airoha_npu_wlan_get_irq_status(struct airoha_npu *npu, int q)
-+{
-+      return npu->ops.wlan_get_irq_status(npu, q);
-+}
-+
-+static inline void airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
-+{
-+      npu->ops.wlan_enable_irq(npu, q);
-+}
-+
-+static inline void airoha_npu_wlan_disable_irq(struct airoha_npu *npu, int q)
-+{
-+      npu->ops.wlan_disable_irq(npu, q);
-+}
-+#else
-+static inline struct airoha_npu *airoha_npu_get(struct device *dev,
-+                                              dma_addr_t *foe_stats_addr)
-+{
-+      return NULL;
-+}
-+
-+static inline void airoha_npu_put(struct airoha_npu *npu)
-+{
-+}
-+
-+static inline int airoha_npu_wlan_init_reserved_memory(struct airoha_npu *npu)
-+{
-+      return -EOPNOTSUPP;
-+}
-+
-+static inline int airoha_npu_wlan_send_msg(struct airoha_npu *npu,
-+                                         int ifindex,
-+                                         enum airoha_npu_wlan_set_cmd cmd,
-+                                         void *data, int data_len, gfp_t gfp)
-+{
-+      return -EOPNOTSUPP;
-+}
-+
-+static inline int airoha_npu_wlan_get_msg(struct airoha_npu *npu, int ifindex,
-+                                        enum airoha_npu_wlan_get_cmd cmd,
-+                                        void *data, int data_len, gfp_t gfp)
-+{
-+      return -EOPNOTSUPP;
-+}
-+
-+static inline u32 airoha_npu_wlan_get_queue_addr(struct airoha_npu *npu,
-+                                               int qid, bool xmit)
-+{
-+      return 0;
-+}
-+
-+static inline void airoha_npu_wlan_set_irq_status(struct airoha_npu *npu,
-+                                                u32 val)
-+{
-+}
-+
-+static inline u32 airoha_npu_wlan_get_irq_status(struct airoha_npu *npu,
-+                                               int q)
-+{
-+      return 0;
-+}
-+
-+static inline void airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
-+{
-+}
-+
-+static inline void airoha_npu_wlan_disable_irq(struct airoha_npu *npu, int q)
-+{
-+}
-+#endif
-+
-+#endif /* AIROHA_OFFLOAD_H */
diff --git a/target/linux/airoha/patches-6.6/085-v6.18-net-airoha-Add-wlan-flowtable-TX-offload.patch b/target/linux/airoha/patches-6.6/085-v6.18-net-airoha-Add-wlan-flowtable-TX-offload.patch
deleted file mode 100644 (file)
index ab9a376..0000000
+++ /dev/null
@@ -1,198 +0,0 @@
-From a8bdd935d1ddb7186358fb60ffe84253e85340c8 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Thu, 14 Aug 2025 09:51:16 +0200
-Subject: [PATCH] net: airoha: Add wlan flowtable TX offload
-
-Introduce support to offload the traffic received on the ethernet NIC
-and forwarded to the wireless one using HW Packet Processor Engine (PPE)
-capabilities.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250814-airoha-en7581-wlan-tx-offload-v1-1-72e0a312003e@kernel.org
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- drivers/net/ethernet/airoha/airoha_eth.h |  11 +++
- drivers/net/ethernet/airoha/airoha_ppe.c | 103 ++++++++++++++++-------
- 2 files changed, 85 insertions(+), 29 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -252,6 +252,10 @@ enum {
- #define AIROHA_FOE_MAC_SMAC_ID                GENMASK(20, 16)
- #define AIROHA_FOE_MAC_PPPOE_ID               GENMASK(15, 0)
-+#define AIROHA_FOE_MAC_WDMA_QOS               GENMASK(15, 12)
-+#define AIROHA_FOE_MAC_WDMA_BAND      BIT(11)
-+#define AIROHA_FOE_MAC_WDMA_WCID      GENMASK(10, 0)
-+
- struct airoha_foe_mac_info_common {
-       u16 vlan1;
-       u16 etype;
-@@ -481,6 +485,13 @@ struct airoha_flow_table_entry {
-       unsigned long cookie;
- };
-+struct airoha_wdma_info {
-+      u8 idx;
-+      u8 queue;
-+      u16 wcid;
-+      u8 bss;
-+};
-+
- /* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
- #define RX_IRQ0_BANK_PIN_MASK                 0x839f
- #define RX_IRQ1_BANK_PIN_MASK                 0x7fe00000
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -190,6 +190,31 @@ static int airoha_ppe_flow_mangle_ipv4(c
-       return 0;
- }
-+static int airoha_ppe_get_wdma_info(struct net_device *dev, const u8 *addr,
-+                                  struct airoha_wdma_info *info)
-+{
-+      struct net_device_path_stack stack;
-+      struct net_device_path *path;
-+      int err;
-+
-+      if (!dev)
-+              return -ENODEV;
-+
-+      err = dev_fill_forward_path(dev, addr, &stack);
-+      if (err)
-+              return err;
-+
-+      path = &stack.path[stack.num_paths - 1];
-+      if (path->type != DEV_PATH_MTK_WDMA)
-+              return -1;
-+
-+      info->idx = path->mtk_wdma.wdma_idx;
-+      info->bss = path->mtk_wdma.bss;
-+      info->wcid = path->mtk_wdma.wcid;
-+
-+      return 0;
-+}
-+
- static int airoha_get_dsa_port(struct net_device **dev)
- {
- #if IS_ENABLED(CONFIG_NET_DSA)
-@@ -220,9 +245,9 @@ static int airoha_ppe_foe_entry_prepare(
-                                       struct airoha_flow_data *data,
-                                       int l4proto)
- {
--      int dsa_port = airoha_get_dsa_port(&dev);
-+      u32 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f), ports_pad, val;
-+      int wlan_etype = -EINVAL, dsa_port = airoha_get_dsa_port(&dev);
-       struct airoha_foe_mac_info_common *l2;
--      u32 qdata, ports_pad, val;
-       u8 smac_id = 0xf;
-       memset(hwe, 0, sizeof(*hwe));
-@@ -236,31 +261,47 @@ static int airoha_ppe_foe_entry_prepare(
-             AIROHA_FOE_IB1_BIND_TTL;
-       hwe->ib1 = val;
--      val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
--            AIROHA_FOE_IB2_PSE_QOS;
--      if (dsa_port >= 0)
--              val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
--
-+      val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
-       if (dev) {
--              struct airoha_gdm_port *port = netdev_priv(dev);
--              u8 pse_port;
--
--              if (!airoha_is_valid_gdm_port(eth, port))
--                      return -EINVAL;
-+              struct airoha_wdma_info info = {};
--              if (dsa_port >= 0)
--                      pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
--              else
--                      pse_port = 2; /* uplink relies on GDM2 loopback */
--              val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
--
--              /* For downlink traffic consume SRAM memory for hw forwarding
--               * descriptors queue.
--               */
--              if (airhoa_is_lan_gdm_port(port))
--                      val |= AIROHA_FOE_IB2_FAST_PATH;
-+              if (!airoha_ppe_get_wdma_info(dev, data->eth.h_dest, &info)) {
-+                      val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, info.idx) |
-+                             FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT,
-+                                        FE_PSE_PORT_CDM4);
-+                      qdata |= FIELD_PREP(AIROHA_FOE_ACTDP, info.bss);
-+                      wlan_etype = FIELD_PREP(AIROHA_FOE_MAC_WDMA_BAND,
-+                                              info.idx) |
-+                                   FIELD_PREP(AIROHA_FOE_MAC_WDMA_WCID,
-+                                              info.wcid);
-+              } else {
-+                      struct airoha_gdm_port *port = netdev_priv(dev);
-+                      u8 pse_port;
-+
-+                      if (!airoha_is_valid_gdm_port(eth, port))
-+                              return -EINVAL;
-+
-+                      if (dsa_port >= 0)
-+                              pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
-+                                                       : port->id;
-+                      else
-+                              pse_port = 2; /* uplink relies on GDM2
-+                                             * loopback
-+                                             */
-+
-+                      val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port) |
-+                             AIROHA_FOE_IB2_PSE_QOS;
-+                      /* For downlink traffic consume SRAM memory for hw
-+                       * forwarding descriptors queue.
-+                       */
-+                      if (airhoa_is_lan_gdm_port(port))
-+                              val |= AIROHA_FOE_IB2_FAST_PATH;
-+                      if (dsa_port >= 0)
-+                              val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ,
-+                                                dsa_port);
--              smac_id = port->id;
-+                      smac_id = port->id;
-+              }
-       }
-       if (is_multicast_ether_addr(data->eth.h_dest))
-@@ -272,7 +313,6 @@ static int airoha_ppe_foe_entry_prepare(
-       if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
-               hwe->ipv6.ports = ports_pad;
--      qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
-       if (type == PPE_PKT_TYPE_BRIDGE) {
-               airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
-               hwe->bridge.data = qdata;
-@@ -313,7 +353,9 @@ static int airoha_ppe_foe_entry_prepare(
-                       l2->vlan2 = data->vlan.hdr[1].id;
-       }
--      if (dsa_port >= 0) {
-+      if (wlan_etype >= 0) {
-+              l2->etype = wlan_etype;
-+      } else if (dsa_port >= 0) {
-               l2->etype = BIT(dsa_port);
-               l2->etype |= !data->vlan.num ? BIT(15) : 0;
-       } else if (data->pppoe.num) {
-@@ -490,6 +532,10 @@ static void airoha_ppe_foe_flow_stats_up
-               meter = &hwe->ipv4.l2.meter;
-       }
-+      pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
-+      if (pse_port == FE_PSE_PORT_CDM4)
-+              return;
-+
-       airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
-       val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
-@@ -500,7 +546,6 @@ static void airoha_ppe_foe_flow_stats_up
-                     AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
-       *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
--      pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
-       nbq = pse_port == 1 ? 6 : 5;
-       *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
-                 AIROHA_FOE_IB2_PSE_QOS);
diff --git a/target/linux/airoha/patches-6.6/086-01-v6.18-net-airoha-Rely-on-airoha_eth-struct-in-airoha_ppe_f.patch b/target/linux/airoha/patches-6.6/086-01-v6.18-net-airoha-Rely-on-airoha_eth-struct-in-airoha_ppe_f.patch
deleted file mode 100644 (file)
index cef2922..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-From 524a43c3a0c17fa0a1223eea36751dcba55e5530 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sat, 23 Aug 2025 09:56:02 +0200
-Subject: [PATCH 1/3] net: airoha: Rely on airoha_eth struct in
- airoha_ppe_flow_offload_cmd signature
-
-Rely on airoha_eth struct in airoha_ppe_flow_offload_cmd routine
-signature and in all the called subroutines.
-This is a preliminary patch to introduce flowtable offload for traffic
-received by the wlan NIC and forwarded to the ethernet one.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250823-airoha-en7581-wlan-rx-offload-v3-1-f78600ec3ed8@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_ppe.c | 20 ++++++++------------
- 1 file changed, 8 insertions(+), 12 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -935,11 +935,10 @@ static int airoha_ppe_entry_idle_time(st
-       return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
- }
--static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
-+static int airoha_ppe_flow_offload_replace(struct airoha_eth *eth,
-                                          struct flow_cls_offload *f)
- {
-       struct flow_rule *rule = flow_cls_offload_flow_rule(f);
--      struct airoha_eth *eth = port->qdma->eth;
-       struct airoha_flow_table_entry *e;
-       struct airoha_flow_data data = {};
-       struct net_device *odev = NULL;
-@@ -1136,10 +1135,9 @@ free_entry:
-       return err;
- }
--static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
-+static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth,
-                                          struct flow_cls_offload *f)
- {
--      struct airoha_eth *eth = port->qdma->eth;
-       struct airoha_flow_table_entry *e;
-       e = rhashtable_lookup(&eth->flow_table, &f->cookie,
-@@ -1182,10 +1180,9 @@ void airoha_ppe_foe_entry_get_stats(stru
-       rcu_read_unlock();
- }
--static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
-+static int airoha_ppe_flow_offload_stats(struct airoha_eth *eth,
-                                        struct flow_cls_offload *f)
- {
--      struct airoha_eth *eth = port->qdma->eth;
-       struct airoha_flow_table_entry *e;
-       u32 idle;
-@@ -1209,16 +1206,16 @@ static int airoha_ppe_flow_offload_stats
-       return 0;
- }
--static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
-+static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth,
-                                      struct flow_cls_offload *f)
- {
-       switch (f->command) {
-       case FLOW_CLS_REPLACE:
--              return airoha_ppe_flow_offload_replace(port, f);
-+              return airoha_ppe_flow_offload_replace(eth, f);
-       case FLOW_CLS_DESTROY:
--              return airoha_ppe_flow_offload_destroy(port, f);
-+              return airoha_ppe_flow_offload_destroy(eth, f);
-       case FLOW_CLS_STATS:
--              return airoha_ppe_flow_offload_stats(port, f);
-+              return airoha_ppe_flow_offload_stats(eth, f);
-       default:
-               break;
-       }
-@@ -1288,7 +1285,6 @@ error_npu_put:
- int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
- {
-       struct airoha_gdm_port *port = netdev_priv(dev);
--      struct flow_cls_offload *cls = type_data;
-       struct airoha_eth *eth = port->qdma->eth;
-       int err = 0;
-@@ -1297,7 +1293,7 @@ int airoha_ppe_setup_tc_block_cb(struct
-       if (!eth->npu)
-               err = airoha_ppe_offload_setup(eth);
-       if (!err)
--              err = airoha_ppe_flow_offload_cmd(port, cls);
-+              err = airoha_ppe_flow_offload_cmd(eth, type_data);
-       mutex_unlock(&flow_offload_mutex);
diff --git a/target/linux/airoha/patches-6.6/086-02-v6.18-net-airoha-Add-airoha_ppe_dev-struct-definition.patch b/target/linux/airoha/patches-6.6/086-02-v6.18-net-airoha-Add-airoha_ppe_dev-struct-definition.patch
deleted file mode 100644 (file)
index 7fa5f9b..0000000
+++ /dev/null
@@ -1,223 +0,0 @@
-From f45fc18b6de04483643e8aa2ab97737abfe03d59 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sat, 23 Aug 2025 09:56:03 +0200
-Subject: [PATCH 2/3] net: airoha: Add airoha_ppe_dev struct definition
-
-Introduce airoha_ppe_dev struct as container for PPE offload callbacks
-consumed by the MT76 driver during flowtable offload for traffic
-received by the wlan NIC and forwarded to the wired one.
-Add airoha_ppe_setup_tc_block_cb routine to PPE offload ops for MT76
-driver.
-Rely on airoha_ppe_dev pointer in airoha_ppe_setup_tc_block_cb
-signature.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250823-airoha-en7581-wlan-rx-offload-v3-2-f78600ec3ed8@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c  |  4 +-
- drivers/net/ethernet/airoha/airoha_eth.h  |  4 +-
- drivers/net/ethernet/airoha/airoha_npu.c  |  1 -
- drivers/net/ethernet/airoha/airoha_ppe.c  | 67 +++++++++++++++++++++--
- include/linux/soc/airoha/airoha_offload.h | 35 ++++++++++++
- 5 files changed, 104 insertions(+), 7 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -2602,13 +2602,15 @@ static int airoha_dev_setup_tc_block_cb(
-                                       void *type_data, void *cb_priv)
- {
-       struct net_device *dev = cb_priv;
-+      struct airoha_gdm_port *port = netdev_priv(dev);
-+      struct airoha_eth *eth = port->qdma->eth;
-       if (!tc_can_offload(dev))
-               return -EOPNOTSUPP;
-       switch (type) {
-       case TC_SETUP_CLSFLOWER:
--              return airoha_ppe_setup_tc_block_cb(dev, type_data);
-+              return airoha_ppe_setup_tc_block_cb(&eth->ppe->dev, type_data);
-       case TC_SETUP_CLSMATCHALL:
-               return airoha_dev_tc_matchall(dev, type_data);
-       default:
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -13,6 +13,7 @@
- #include <linux/kernel.h>
- #include <linux/netdevice.h>
- #include <linux/reset.h>
-+#include <linux/soc/airoha/airoha_offload.h>
- #include <net/dsa.h>
- #define AIROHA_MAX_NUM_GDM_PORTS      4
-@@ -546,6 +547,7 @@ struct airoha_gdm_port {
- #define AIROHA_RXD4_FOE_ENTRY         GENMASK(15, 0)
- struct airoha_ppe {
-+      struct airoha_ppe_dev dev;
-       struct airoha_eth *eth;
-       void *foe;
-@@ -622,7 +624,7 @@ bool airoha_is_valid_gdm_port(struct air
- void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
-                         u16 hash);
--int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
-+int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data);
- int airoha_ppe_init(struct airoha_eth *eth);
- void airoha_ppe_deinit(struct airoha_eth *eth);
- void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
---- a/drivers/net/ethernet/airoha/airoha_npu.c
-+++ b/drivers/net/ethernet/airoha/airoha_npu.c
-@@ -11,7 +11,6 @@
- #include <linux/of_platform.h>
- #include <linux/of_reserved_mem.h>
- #include <linux/regmap.h>
--#include <linux/soc/airoha/airoha_offload.h>
- #include "airoha_eth.h"
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -6,8 +6,9 @@
- #include <linux/ip.h>
- #include <linux/ipv6.h>
-+#include <linux/of_platform.h>
-+#include <linux/platform_device.h>
- #include <linux/rhashtable.h>
--#include <linux/soc/airoha/airoha_offload.h>
- #include <net/ipv6.h>
- #include <net/pkt_cls.h>
-@@ -1282,10 +1283,10 @@ error_npu_put:
-       return err;
- }
--int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
-+int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data)
- {
--      struct airoha_gdm_port *port = netdev_priv(dev);
--      struct airoha_eth *eth = port->qdma->eth;
-+      struct airoha_ppe *ppe = dev->priv;
-+      struct airoha_eth *eth = ppe->eth;
-       int err = 0;
-       mutex_lock(&flow_offload_mutex);
-@@ -1338,6 +1339,61 @@ void airoha_ppe_init_upd_mem(struct airo
-                    PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
- }
-+struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
-+{
-+      struct platform_device *pdev;
-+      struct device_node *np;
-+      struct airoha_eth *eth;
-+
-+      np = of_parse_phandle(dev->of_node, "airoha,eth", 0);
-+      if (!np)
-+              return ERR_PTR(-ENODEV);
-+
-+      pdev = of_find_device_by_node(np);
-+      if (!pdev) {
-+              dev_err(dev, "cannot find device node %s\n", np->name);
-+              of_node_put(np);
-+              return ERR_PTR(-ENODEV);
-+      }
-+      of_node_put(np);
-+
-+      if (!try_module_get(THIS_MODULE)) {
-+              dev_err(dev, "failed to get the device driver module\n");
-+              goto error_pdev_put;
-+      }
-+
-+      eth = platform_get_drvdata(pdev);
-+      if (!eth)
-+              goto error_module_put;
-+
-+      if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
-+              dev_err(&pdev->dev,
-+                      "failed to create device link to consumer %s\n",
-+                      dev_name(dev));
-+              goto error_module_put;
-+      }
-+
-+      return &eth->ppe->dev;
-+
-+error_module_put:
-+      module_put(THIS_MODULE);
-+error_pdev_put:
-+      platform_device_put(pdev);
-+
-+      return ERR_PTR(-ENODEV);
-+}
-+EXPORT_SYMBOL_GPL(airoha_ppe_get_dev);
-+
-+void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
-+{
-+      struct airoha_ppe *ppe = dev->priv;
-+      struct airoha_eth *eth = ppe->eth;
-+
-+      module_put(THIS_MODULE);
-+      put_device(eth->dev);
-+}
-+EXPORT_SYMBOL_GPL(airoha_ppe_put_dev);
-+
- int airoha_ppe_init(struct airoha_eth *eth)
- {
-       struct airoha_ppe *ppe;
-@@ -1347,6 +1403,9 @@ int airoha_ppe_init(struct airoha_eth *e
-       if (!ppe)
-               return -ENOMEM;
-+      ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
-+      ppe->dev.priv = ppe;
-+
-       foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
-       ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
-                                      GFP_KERNEL);
---- a/include/linux/soc/airoha/airoha_offload.h
-+++ b/include/linux/soc/airoha/airoha_offload.h
-@@ -9,6 +9,41 @@
- #include <linux/spinlock.h>
- #include <linux/workqueue.h>
-+struct airoha_ppe_dev {
-+      struct {
-+              int (*setup_tc_block_cb)(struct airoha_ppe_dev *dev,
-+                                       void *type_data);
-+      } ops;
-+
-+      void *priv;
-+};
-+
-+#if (IS_BUILTIN(CONFIG_NET_AIROHA) || IS_MODULE(CONFIG_NET_AIROHA))
-+struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev);
-+void airoha_ppe_put_dev(struct airoha_ppe_dev *dev);
-+
-+static inline int airoha_ppe_dev_setup_tc_block_cb(struct airoha_ppe_dev *dev,
-+                                                 void *type_data)
-+{
-+      return dev->ops.setup_tc_block_cb(dev, type_data);
-+}
-+#else
-+static inline struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
-+{
-+      return NULL;
-+}
-+
-+static inline void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
-+{
-+}
-+
-+static inline int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev,
-+                                             void *type_data)
-+{
-+      return -EOPNOTSUPP;
-+}
-+#endif
-+
- #define NPU_NUM_CORES         8
- #define NPU_NUM_IRQ           6
- #define NPU_RX0_DESC_NUM      512
diff --git a/target/linux/airoha/patches-6.6/086-03-v6.18-net-airoha-Introduce-check_skb-callback-in-ppe_dev-o.patch b/target/linux/airoha/patches-6.6/086-03-v6.18-net-airoha-Introduce-check_skb-callback-in-ppe_dev-o.patch
deleted file mode 100644 (file)
index 1edc2aa..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-From a7cc1aa151e3a9c0314b995f06102f7763d3bd71 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sat, 23 Aug 2025 09:56:04 +0200
-Subject: [PATCH 3/3] net: airoha: Introduce check_skb callback in ppe_dev ops
-
-Export airoha_ppe_check_skb routine in ppe_dev ops. check_skb callback
-will be used by the MT76 driver in order to offload the traffic received
-by the wlan NIC and forwarded to the ethernet one.
-Add rx_wlan parameter to airoha_ppe_check_skb routine signature.
-
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://patch.msgid.link/20250823-airoha-en7581-wlan-rx-offload-v3-3-f78600ec3ed8@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c  |  3 ++-
- drivers/net/ethernet/airoha/airoha_eth.h  |  8 ++------
- drivers/net/ethernet/airoha/airoha_ppe.c  | 25 +++++++++++++----------
- include/linux/soc/airoha/airoha_offload.h | 20 ++++++++++++++++++
- 4 files changed, 38 insertions(+), 18 deletions(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -703,7 +703,8 @@ static int airoha_qdma_rx_process(struct
-               reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
-               if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
--                      airoha_ppe_check_skb(eth->ppe, q->skb, hash);
-+                      airoha_ppe_check_skb(&eth->ppe->dev, q->skb, hash,
-+                                           false);
-               done++;
-               napi_gro_receive(&q->napi, q->skb);
---- a/drivers/net/ethernet/airoha/airoha_eth.h
-+++ b/drivers/net/ethernet/airoha/airoha_eth.h
-@@ -230,10 +230,6 @@ struct airoha_hw_stats {
- };
- enum {
--      PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
--};
--
--enum {
-       AIROHA_FOE_STATE_INVALID,
-       AIROHA_FOE_STATE_UNBIND,
-       AIROHA_FOE_STATE_BIND,
-@@ -622,8 +618,8 @@ static inline bool airhoa_is_lan_gdm_por
- bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
-                             struct airoha_gdm_port *port);
--void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
--                        u16 hash);
-+void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
-+                        u16 hash, bool rx_wlan);
- int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data);
- int airoha_ppe_init(struct airoha_eth *eth);
- void airoha_ppe_deinit(struct airoha_eth *eth);
---- a/drivers/net/ethernet/airoha/airoha_ppe.c
-+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
-@@ -616,7 +616,7 @@ static bool airoha_ppe_foe_compare_entry
- static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
-                                      struct airoha_foe_entry *e,
--                                     u32 hash)
-+                                     u32 hash, bool rx_wlan)
- {
-       struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
-       u32 ts = airoha_ppe_get_timestamp(ppe);
-@@ -639,7 +639,8 @@ static int airoha_ppe_foe_commit_entry(s
-               goto unlock;
-       }
--      airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
-+      if (!rx_wlan)
-+              airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
-       if (hash < PPE_SRAM_NUM_ENTRIES) {
-               dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
-@@ -665,7 +666,7 @@ static void airoha_ppe_foe_remove_flow(s
-               e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
-               e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
-                                         AIROHA_FOE_STATE_INVALID);
--              airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
-+              airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash, false);
-               e->hash = 0xffff;
-       }
-       if (e->type == FLOW_TYPE_L2_SUBFLOW) {
-@@ -704,7 +705,7 @@ static void airoha_ppe_foe_flow_remove_e
- static int
- airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
-                                   struct airoha_flow_table_entry *e,
--                                  u32 hash)
-+                                  u32 hash, bool rx_wlan)
- {
-       u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
-       struct airoha_foe_entry *hwe_p, hwe;
-@@ -745,14 +746,14 @@ airoha_ppe_foe_commit_subflow_entry(stru
-       }
-       hwe.bridge.data = e->data.bridge.data;
--      airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
-+      airoha_ppe_foe_commit_entry(ppe, &hwe, hash, rx_wlan);
-       return 0;
- }
- static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
-                                       struct sk_buff *skb,
--                                      u32 hash)
-+                                      u32 hash, bool rx_wlan)
- {
-       struct airoha_flow_table_entry *e;
-       struct airoha_foe_bridge br = {};
-@@ -785,7 +786,7 @@ static void airoha_ppe_foe_insert_entry(
-               if (!airoha_ppe_foe_compare_entry(e, hwe))
-                       continue;
--              airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
-+              airoha_ppe_foe_commit_entry(ppe, &e->data, hash, rx_wlan);
-               commit_done = true;
-               e->hash = hash;
-       }
-@@ -797,7 +798,7 @@ static void airoha_ppe_foe_insert_entry(
-       e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
-                                  airoha_l2_flow_table_params);
-       if (e)
--              airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
-+              airoha_ppe_foe_commit_subflow_entry(ppe, e, hash, rx_wlan);
- unlock:
-       spin_unlock_bh(&ppe_lock);
- }
-@@ -1301,9 +1302,10 @@ int airoha_ppe_setup_tc_block_cb(struct
-       return err;
- }
--void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
--                        u16 hash)
-+void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
-+                        u16 hash, bool rx_wlan)
- {
-+      struct airoha_ppe *ppe = dev->priv;
-       u16 now, diff;
-       if (hash > PPE_HASH_MASK)
-@@ -1315,7 +1317,7 @@ void airoha_ppe_check_skb(struct airoha_
-               return;
-       ppe->foe_check_time[hash] = now;
--      airoha_ppe_foe_insert_entry(ppe, skb, hash);
-+      airoha_ppe_foe_insert_entry(ppe, skb, hash, rx_wlan);
- }
- void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
-@@ -1404,6 +1406,7 @@ int airoha_ppe_init(struct airoha_eth *e
-               return -ENOMEM;
-       ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
-+      ppe->dev.ops.check_skb = airoha_ppe_check_skb;
-       ppe->dev.priv = ppe;
-       foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
---- a/include/linux/soc/airoha/airoha_offload.h
-+++ b/include/linux/soc/airoha/airoha_offload.h
-@@ -9,10 +9,17 @@
- #include <linux/spinlock.h>
- #include <linux/workqueue.h>
-+enum {
-+      PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
-+};
-+
- struct airoha_ppe_dev {
-       struct {
-               int (*setup_tc_block_cb)(struct airoha_ppe_dev *dev,
-                                        void *type_data);
-+              void (*check_skb)(struct airoha_ppe_dev *dev,
-+                                struct sk_buff *skb, u16 hash,
-+                                bool rx_wlan);
-       } ops;
-       void *priv;
-@@ -27,6 +34,13 @@ static inline int airoha_ppe_dev_setup_t
- {
-       return dev->ops.setup_tc_block_cb(dev, type_data);
- }
-+
-+static inline void airoha_ppe_dev_check_skb(struct airoha_ppe_dev *dev,
-+                                          struct sk_buff *skb,
-+                                          u16 hash, bool rx_wlan)
-+{
-+      dev->ops.check_skb(dev, skb, hash, rx_wlan);
-+}
- #else
- static inline struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
- {
-@@ -42,6 +56,12 @@ static inline int airoha_ppe_setup_tc_bl
- {
-       return -EOPNOTSUPP;
- }
-+
-+static inline void airoha_ppe_dev_check_skb(struct airoha_ppe_dev *dev,
-+                                          struct sk_buff *skb, u16 hash,
-+                                          bool rx_wlan)
-+{
-+}
- #endif
- #define NPU_NUM_CORES         8
diff --git a/target/linux/airoha/patches-6.6/087-v6.17-pinctrl-airoha-Fix-return-value-in-pinconf-callbacks.patch b/target/linux/airoha/patches-6.6/087-v6.17-pinctrl-airoha-Fix-return-value-in-pinconf-callbacks.patch
deleted file mode 100644 (file)
index f12b941..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-From 563fcd6475931c5c8c652a4dd548256314cc87ed Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Fri, 22 Aug 2025 14:14:18 +0200
-Subject: [PATCH] pinctrl: airoha: Fix return value in pinconf callbacks
-
-Pinctrl stack requires ENOTSUPP error code if the parameter is not
-supported by the pinctrl driver. Fix the returned error code in pinconf
-callbacks if the operation is not supported.
-
-Fixes: 1c8ace2d0725 ("pinctrl: airoha: Add support for EN7581 SoC")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Link: https://lore.kernel.org/20250822-airoha-pinconf-err-val-fix-v1-1-87b4f264ced2@kernel.org
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
----
- drivers/pinctrl/mediatek/pinctrl-airoha.c | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
---- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
-+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
-@@ -2696,7 +2696,7 @@ static int airoha_pinconf_get(struct pin
-               arg = 1;
-               break;
-       default:
--              return -EOPNOTSUPP;
-+              return -ENOTSUPP;
-       }
-       *config = pinconf_to_config_packed(param, arg);
-@@ -2790,7 +2790,7 @@ static int airoha_pinconf_set(struct pin
-                       break;
-               }
-               default:
--                      return -EOPNOTSUPP;
-+                      return -ENOTSUPP;
-               }
-       }
-@@ -2807,10 +2807,10 @@ static int airoha_pinconf_group_get(stru
-               if (airoha_pinconf_get(pctrl_dev,
-                                      airoha_pinctrl_groups[group].pins[i],
-                                      config))
--                      return -EOPNOTSUPP;
-+                      return -ENOTSUPP;
-               if (i && cur_config != *config)
--                      return -EOPNOTSUPP;
-+                      return -ENOTSUPP;
-               cur_config = *config;
-       }
diff --git a/target/linux/airoha/patches-6.6/089-v6.14-net-airoha-Fix-channel-configuration-for-ETS-Qdisc.patch b/target/linux/airoha/patches-6.6/089-v6.14-net-airoha-Fix-channel-configuration-for-ETS-Qdisc.patch
deleted file mode 100644 (file)
index 41f7570..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-From 7d0da8f862340c5f42f0062b8560b8d0971a6ac4 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Tue, 7 Jan 2025 23:26:28 +0100
-Subject: [PATCH] net: airoha: Fix channel configuration for ETS Qdisc
-
-Limit ETS QoS channel to AIROHA_NUM_QOS_CHANNELS in
-airoha_tc_setup_qdisc_ets() in order to align the configured channel to
-the value set in airoha_dev_select_queue().
-
-Fixes: 20bf7d07c956 ("net: airoha: Add sched ETS offload support")
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
-Link: https://patch.msgid.link/20250107-airoha-ets-fix-chan-v1-1-97f66ed3a068@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/airoha/airoha_eth.c | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/airoha/airoha_eth.c
-+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -2184,11 +2184,14 @@ static int airoha_qdma_get_tx_ets_stats(
- static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
-                                    struct tc_ets_qopt_offload *opt)
- {
--      int channel = TC_H_MAJ(opt->handle) >> 16;
-+      int channel;
-       if (opt->parent == TC_H_ROOT)
-               return -EINVAL;
-+      channel = TC_H_MAJ(opt->handle) >> 16;
-+      channel = channel % AIROHA_NUM_QOS_CHANNELS;
-+
-       switch (opt->command) {
-       case TC_ETS_REPLACE:
-               return airoha_qdma_set_tx_ets_sched(port, channel, opt);
diff --git a/target/linux/airoha/patches-6.6/091-01-v6.18-pinctrl-airoha-fix-wrong-PHY-LED-mux-value-for-LED1-.patch b/target/linux/airoha/patches-6.6/091-01-v6.18-pinctrl-airoha-fix-wrong-PHY-LED-mux-value-for-LED1-.patch
deleted file mode 100644 (file)
index f94eab4..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-From af87d38c442c75a40c7d0a7d8c31557e2e6ccf98 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Sun, 25 May 2025 20:22:40 +0200
-Subject: [PATCH 1/2] pinctrl: airoha: fix wrong PHY LED mux value for LED1
- GPIO46
-
-In all the MUX value for LED1 GPIO46 there is a Copy-Paste error where
-the MUX value is set to LED0_MODE_MASK instead of LED1_MODE_MASK.
-
-This wasn't notice as there were no board that made use of the
-secondary PHY LED but looking at the internal Documentation the actual
-value should be LED1_MODE_MASK similar to the other GPIO entry.
-
-Fix the wrong value to apply the correct MUX configuration.
-
-Cc: stable@vger.kernel.org
-Fixes: 1c8ace2d0725 ("pinctrl: airoha: Add support for EN7581 SoC")
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- drivers/pinctrl/mediatek/pinctrl-airoha.c | 16 ++++++++--------
- 1 file changed, 8 insertions(+), 8 deletions(-)
-
---- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
-+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
-@@ -1746,8 +1746,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[0] = {
-                       AIROHA_FUNC_MUX,
-                       REG_GPIO_2ND_I2C_MODE,
--                      GPIO_LAN3_LED0_MODE_MASK,
--                      GPIO_LAN3_LED0_MODE_MASK
-+                      GPIO_LAN3_LED1_MODE_MASK,
-+                      GPIO_LAN3_LED1_MODE_MASK
-               },
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-@@ -1810,8 +1810,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[0] = {
-                       AIROHA_FUNC_MUX,
-                       REG_GPIO_2ND_I2C_MODE,
--                      GPIO_LAN3_LED0_MODE_MASK,
--                      GPIO_LAN3_LED0_MODE_MASK
-+                      GPIO_LAN3_LED1_MODE_MASK,
-+                      GPIO_LAN3_LED1_MODE_MASK
-               },
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-@@ -1874,8 +1874,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[0] = {
-                       AIROHA_FUNC_MUX,
-                       REG_GPIO_2ND_I2C_MODE,
--                      GPIO_LAN3_LED0_MODE_MASK,
--                      GPIO_LAN3_LED0_MODE_MASK
-+                      GPIO_LAN3_LED1_MODE_MASK,
-+                      GPIO_LAN3_LED1_MODE_MASK
-               },
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
-@@ -1938,8 +1938,8 @@ static const struct airoha_pinctrl_func_
-               .regmap[0] = {
-                       AIROHA_FUNC_MUX,
-                       REG_GPIO_2ND_I2C_MODE,
--                      GPIO_LAN3_LED0_MODE_MASK,
--                      GPIO_LAN3_LED0_MODE_MASK
-+                      GPIO_LAN3_LED1_MODE_MASK,
-+                      GPIO_LAN3_LED1_MODE_MASK
-               },
-               .regmap[1] = {
-                       AIROHA_FUNC_MUX,
diff --git a/target/linux/airoha/patches-6.6/091-02-v6.18-pinctrl-airoha-fix-wrong-MDIO-function-bitmaks.patch b/target/linux/airoha/patches-6.6/091-02-v6.18-pinctrl-airoha-fix-wrong-MDIO-function-bitmaks.patch
deleted file mode 100644 (file)
index 45052b4..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-From 110930eb12699b92f767fc599c7ab467dd42358a Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Tue, 8 Jul 2025 14:49:56 +0200
-Subject: [PATCH 2/2] pinctrl: airoha: fix wrong MDIO function bitmaks
-
-With further testing with an attached Aeonsemi it was discovered that
-the pinctrl MDIO function applied the wrong bitmask. The error was
-probably caused by the confusing documentation related to these bits.
-
-Inspecting what the bootloader actually configure, the SGMII_MDIO_MODE
-is never actually set but instead it's set force enable to the 2 GPIO
-(gpio 1-2) for MDC and MDIO pin.
-
-Applying this configuration permits correct functionality of any
-externally attached PHY.
-
-Cc: stable@vger.kernel.org
-Fixes: 1c8ace2d0725 ("pinctrl: airoha: Add support for EN7581 SoC")
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- drivers/pinctrl/mediatek/pinctrl-airoha.c | 15 +++++++++------
- 1 file changed, 9 insertions(+), 6 deletions(-)
-
---- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
-+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
-@@ -102,6 +102,9 @@
- #define JTAG_UDI_EN_MASK                      BIT(4)
- #define JTAG_DFD_EN_MASK                      BIT(3)
-+#define REG_FORCE_GPIO_EN                     0x0228
-+#define FORCE_GPIO_EN(n)                      BIT(n)
-+
- /* LED MAP */
- #define REG_LAN_LED0_MAPPING                  0x027c
- #define REG_LAN_LED1_MAPPING                  0x0280
-@@ -713,16 +716,16 @@ static const struct airoha_pinctrl_func_
-               .name = "mdio",
-               .regmap[0] = {
-                       AIROHA_FUNC_MUX,
--                      REG_GPIO_PON_MODE,
--                      GPIO_SGMII_MDIO_MODE_MASK,
--                      GPIO_SGMII_MDIO_MODE_MASK
--              },
--              .regmap[1] = {
--                      AIROHA_FUNC_MUX,
-                       REG_GPIO_2ND_I2C_MODE,
-                       GPIO_MDC_IO_MASTER_MODE_MODE,
-                       GPIO_MDC_IO_MASTER_MODE_MODE
-               },
-+              .regmap[1] = {
-+                      AIROHA_FUNC_MUX,
-+                      REG_FORCE_GPIO_EN,
-+                      FORCE_GPIO_EN(1) | FORCE_GPIO_EN(2),
-+                      FORCE_GPIO_EN(1) | FORCE_GPIO_EN(2)
-+              },
-               .regmap_size = 2,
-       },
- };
diff --git a/target/linux/airoha/patches-6.6/104-i2c-mt7621-optional-reset.patch b/target/linux/airoha/patches-6.6/104-i2c-mt7621-optional-reset.patch
deleted file mode 100644 (file)
index 1fad1bd..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
---- a/drivers/i2c/busses/i2c-mt7621.c
-+++ b/drivers/i2c/busses/i2c-mt7621.c
-@@ -85,7 +85,7 @@ static void mtk_i2c_reset(struct mtk_i2c
- {
-       int ret;
--      ret = device_reset(i2c->adap.dev.parent);
-+      ret = device_reset_optional(i2c->adap.dev.parent);
-       if (ret)
-               dev_err(i2c->dev, "I2C reset failed!\n");
diff --git a/target/linux/airoha/patches-6.6/105-uart-add-en7523-support.patch b/target/linux/airoha/patches-6.6/105-uart-add-en7523-support.patch
deleted file mode 100644 (file)
index 39a4944..0000000
+++ /dev/null
@@ -1,206 +0,0 @@
---- /dev/null
-+++ b/drivers/tty/serial/8250/8250_en7523.c
-@@ -0,0 +1,94 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Airoha EN7523 driver.
-+ *
-+ * Copyright (c) 2022 Genexis Sweden AB
-+ * Author: Benjamin Larsson <benjamin.larsson@genexis.eu>
-+ */
-+#include <linux/clk.h>
-+#include <linux/io.h>
-+#include <linux/module.h>
-+#include <linux/of_irq.h>
-+#include <linux/of_platform.h>
-+#include <linux/pinctrl/consumer.h>
-+#include <linux/platform_device.h>
-+#include <linux/pm_runtime.h>
-+#include <linux/serial_8250.h>
-+#include <linux/serial_reg.h>
-+#include <linux/console.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/tty.h>
-+#include <linux/tty_flip.h>
-+
-+#include "8250.h"
-+
-+
-+/* The Airoha UART is 16550-compatible except for the baud rate calculation.
-+ *
-+ * crystal_clock = 20 MHz
-+ * xindiv_clock = crystal_clock / clock_div
-+ * (x/y) = XYD, 32 bit register with 16 bits of x and and then 16 bits of y
-+ * clock_div = XINCLK_DIVCNT (default set to 10 (0x4)),
-+ *           - 3 bit register [ 1, 2, 4, 8, 10, 12, 16, 20 ]
-+ *
-+ * baud_rate = ((xindiv_clock) * (x/y)) / ([BRDH,BRDL] * 16)
-+ *
-+ * XYD_y seems to need to be larger then XYD_x for things to work.
-+ * Setting [BRDH,BRDL] to [0,1] and XYD_y to 65000 give even values
-+ * for usual baud rates.
-+ *
-+ * Selecting divider needs to fulfill
-+ * 1.8432 MHz <= xindiv_clk <= APB clock / 2
-+ * The clocks are unknown but a divider of value 1 did not work.
-+ *
-+ * Optimally the XYD, BRD and XINCLK_DIVCNT registers could be searched to
-+ * find values that gives the least error for every baud rate. But searching
-+ * the space takes time and in practise only a few rates are of interest.
-+ * With some value combinations not working a tested subset is used giving
-+ * a usable range from 110 to 460800 baud.
-+ */
-+
-+#define CLOCK_DIV_TAB_ELEMS 3
-+#define XYD_Y 65000
-+#define XINDIV_CLOCK 20000000
-+#define UART_BRDL_20M 0x01
-+#define UART_BRDH_20M 0x00
-+
-+static int clock_div_tab[] = { 10, 4, 2};
-+static int clock_div_reg[] = {  4, 2, 1};
-+
-+
-+int en7523_set_uart_baud_rate (struct uart_port *port, unsigned int baud)
-+{
-+      struct uart_8250_port *up = up_to_u8250p(port);
-+      unsigned int xyd_x, nom, denom;
-+      int i;
-+
-+      /* set DLAB to access the baud rate divider registers (BRDH, BRDL) */
-+      serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
-+
-+      /* set baud rate calculation defaults */
-+
-+      /* set BRDIV ([BRDH,BRDL]) to 1 */
-+      serial_port_out(port, UART_BRDL, UART_BRDL_20M);
-+      serial_port_out(port, UART_BRDH, UART_BRDH_20M);
-+
-+      /* calculate XYD_x and XINCLKDR register */
-+
-+      for (i = 0 ; i < CLOCK_DIV_TAB_ELEMS ; i++) {
-+              denom = (XINDIV_CLOCK/40) / clock_div_tab[i];
-+              nom = (baud * (XYD_Y/40));
-+              xyd_x = ((nom/denom) << 4);
-+              if (xyd_x < XYD_Y) break;
-+      }
-+
-+      serial_port_out(port, UART_XINCLKDR, clock_div_reg[i]);
-+      serial_port_out(port, UART_XYD, (xyd_x<<16) | XYD_Y);
-+
-+      /* unset DLAB */
-+      serial_port_out(port, UART_LCR, up->lcr);
-+
-+      return 0;
-+}
-+
-+EXPORT_SYMBOL_GPL(en7523_set_uart_baud_rate);
---- a/drivers/tty/serial/8250/8250_of.c
-+++ b/drivers/tty/serial/8250/8250_of.c
-@@ -338,6 +338,7 @@ static const struct of_device_id of_plat
-       { .compatible = "ti,da830-uart", .data = (void *)PORT_DA830, },
-       { .compatible = "nuvoton,wpcm450-uart", .data = (void *)PORT_NPCM, },
-       { .compatible = "nuvoton,npcm750-uart", .data = (void *)PORT_NPCM, },
-+      { .compatible = "airoha,en7523-uart", .data = (void *)PORT_AIROHA, },
-       { /* end of list */ },
- };
- MODULE_DEVICE_TABLE(of, of_platform_serial_table);
---- a/drivers/tty/serial/8250/8250_port.c
-+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -330,6 +330,14 @@ static const struct serial8250_config ua
-               .rxtrig_bytes   = {1, 8, 16, 30},
-               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
-       },
-+      [PORT_AIROHA] = {
-+              .name           = "Airoha 16550",
-+              .fifo_size      = 8,
-+              .tx_loadsz      = 1,
-+              .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01,
-+              .rxtrig_bytes   = {1, 4},
-+              .flags          = UART_CAP_FIFO,
-+      },
- };
- /* Uart divisor latch read */
-@@ -2888,6 +2896,12 @@ serial8250_do_set_termios(struct uart_po
-       serial8250_set_divisor(port, baud, quot, frac);
-+#ifdef CONFIG_SERIAL_8250_AIROHA
-+      /* Airoha SoCs have custom registers for baud rate settings */
-+      if (port->type == PORT_AIROHA)
-+              en7523_set_uart_baud_rate(port, baud);
-+#endif
-+
-       /*
-        * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
-        * is written without DLAB set, this mode will be disabled.
---- a/drivers/tty/serial/8250/Kconfig
-+++ b/drivers/tty/serial/8250/Kconfig
-@@ -354,6 +354,16 @@ config SERIAL_8250_ACORN
-         system, say Y to this option.  The driver can handle 1, 2, or 3 port
-         cards.  If unsure, say N.
-+config SERIAL_8250_AIROHA
-+      tristate "Airoha UART support"
-+      depends on (ARCH_AIROHA || COMPILE_TEST) && OF && SERIAL_8250
-+      help
-+        Selecting this option enables an Airoha SoC specific baud rate
-+        calculation routine on an otherwise 16550 compatible UART hardware.
-+
-+        If you have an Airoha based board and want to use the serial port,
-+        say Y to this option. If unsure, say N.
-+
- config SERIAL_8250_BCM2835AUX
-       tristate "BCM2835 auxiliar mini UART support"
-       depends on ARCH_BCM2835 || COMPILE_TEST
---- a/drivers/tty/serial/8250/Makefile
-+++ b/drivers/tty/serial/8250/Makefile
-@@ -46,6 +46,7 @@ obj-$(CONFIG_SERIAL_8250_PERICOM)    += 825
- obj-$(CONFIG_SERIAL_8250_PXA)         += 8250_pxa.o
- obj-$(CONFIG_SERIAL_8250_TEGRA)               += 8250_tegra.o
- obj-$(CONFIG_SERIAL_8250_BCM7271)     += 8250_bcm7271.o
-+obj-$(CONFIG_SERIAL_8250_AIROHA)      += 8250_en7523.o
- obj-$(CONFIG_SERIAL_OF_PLATFORM)      += 8250_of.o
- CFLAGS_8250_ingenic.o += -I$(srctree)/scripts/dtc/libfdt
---- a/include/uapi/linux/serial_reg.h
-+++ b/include/uapi/linux/serial_reg.h
-@@ -382,5 +382,17 @@
- #define UART_ALTR_EN_TXFIFO_LW        0x01    /* Enable the TX FIFO Low Watermark */
- #define UART_ALTR_TX_LOW      0x41    /* Tx FIFO Low Watermark */
-+/*
-+ * These are definitions for the Airoha EN75XX uart registers
-+ * Normalized because of 32 bits registers.
-+ */
-+#define UART_BRDL             0
-+#define UART_BRDH             1
-+#define UART_XINCLKDR         10
-+#define UART_XYD              11
-+#define UART_TXLVLCNT         12
-+#define UART_RXLVLCNT         13
-+#define UART_FINTLVL          14
-+
- #endif /* _LINUX_SERIAL_REG_H */
---- a/include/uapi/linux/serial_core.h
-+++ b/include/uapi/linux/serial_core.h
-@@ -45,6 +45,7 @@
- #define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */
- #define PORT_RT2880   29      /* Ralink RT2880 internal UART */
- #define PORT_16550A_FSL64 30  /* Freescale 16550 UART with 64 FIFOs */
-+#define PORT_AIROHA    31     /* Airoha 16550 UART */
- /*
-  * ARM specific type numbers.  These are not currently guaranteed
---- a/include/linux/serial_8250.h
-+++ b/include/linux/serial_8250.h
-@@ -195,6 +195,7 @@ void serial8250_do_set_mctrl(struct uart
- void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
-                              unsigned int quot, unsigned int quot_frac);
- int fsl8250_handle_irq(struct uart_port *port);
-+int en7523_set_uart_baud_rate(struct uart_port *port, unsigned int baud);
- int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
- u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr);
- void serial8250_read_char(struct uart_8250_port *up, u16 lsr);
diff --git a/target/linux/airoha/patches-6.6/108-pwm-airoha-Add-support-for-EN7581-SoC.patch b/target/linux/airoha/patches-6.6/108-pwm-airoha-Add-support-for-EN7581-SoC.patch
deleted file mode 100644 (file)
index 0b114d5..0000000
+++ /dev/null
@@ -1,439 +0,0 @@
-From 97e4e7b106b08373f90ff1b8c4daf6c2254386a8 Mon Sep 17 00:00:00 2001
-From: Benjamin Larsson <benjamin.larsson@genexis.eu>
-Date: Wed, 23 Oct 2024 01:20:06 +0200
-Subject: [PATCH] pwm: airoha: Add support for EN7581 SoC
-
-Introduce driver for PWM module available on EN7581 SoC.
-
-Signed-off-by: Benjamin Larsson <benjamin.larsson@genexis.eu>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
----
- drivers/pwm/Kconfig      |  11 ++
- drivers/pwm/Makefile     |   1 +
- drivers/pwm/pwm-airoha.c | 386 +++++++++++++++++++++++++++++++++++++++
- 3 files changed, 398 insertions(+)
- create mode 100644 drivers/pwm/pwm-airoha.c
-
---- a/drivers/pwm/Kconfig
-+++ b/drivers/pwm/Kconfig
-@@ -51,6 +51,17 @@ config PWM_AB8500
-         To compile this driver as a module, choose M here: the module
-         will be called pwm-ab8500.
-+config PWM_AIROHA
-+      tristate "Airoha PWM support"
-+      depends on ARCH_AIROHA || COMPILE_TEST
-+      depends on OF
-+      select REGMAP_MMIO
-+      help
-+        Generic PWM framework driver for Airoha SoC.
-+
-+        To compile this driver as a module, choose M here: the module
-+        will be called pwm-airoha.
-+
- config PWM_APPLE
-       tristate "Apple SoC PWM support"
-       depends on ARCH_APPLE || COMPILE_TEST
---- a/drivers/pwm/Makefile
-+++ b/drivers/pwm/Makefile
-@@ -2,6 +2,7 @@
- obj-$(CONFIG_PWM)             += core.o
- obj-$(CONFIG_PWM_SYSFS)               += sysfs.o
- obj-$(CONFIG_PWM_AB8500)      += pwm-ab8500.o
-+obj-$(CONFIG_PWM_AIROHA)      += pwm-airoha.o
- obj-$(CONFIG_PWM_APPLE)               += pwm-apple.o
- obj-$(CONFIG_PWM_ATMEL)               += pwm-atmel.o
- obj-$(CONFIG_PWM_ATMEL_HLCDC_PWM)     += pwm-atmel-hlcdc.o
---- /dev/null
-+++ b/drivers/pwm/pwm-airoha.c
-@@ -0,0 +1,388 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright 2022 Markus Gothe <markus.gothe@genexis.eu>
-+ *
-+ *  Limitations:
-+ *  - No disable bit, so a disabled PWM is simulated by setting duty_cycle to 0
-+ *  - Only 8 concurrent waveform generators are available for 8 combinations of
-+ *    duty_cycle and period. Waveform generators are shared between 16 GPIO
-+ *    pins and 17 SIPO GPIO pins.
-+ *  - Supports only normal polarity.
-+ *  - On configuration the currently running period is completed.
-+ */
-+
-+#include <linux/bitfield.h>
-+#include <linux/err.h>
-+#include <linux/io.h>
-+#include <linux/iopoll.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/platform_device.h>
-+#include <linux/pwm.h>
-+#include <linux/gpio.h>
-+#include <linux/bitops.h>
-+#include <linux/regmap.h>
-+#include <asm/div64.h>
-+
-+#define REG_SGPIO_LED_DATA            0x0024
-+#define SGPIO_LED_DATA_SHIFT_FLAG     BIT(31)
-+#define SGPIO_LED_DATA_DATA           GENMASK(16, 0)
-+
-+#define REG_SGPIO_CLK_DIVR            0x0028
-+#define REG_SGPIO_CLK_DIVR_MASK               GENMASK(1, 0)
-+#define REG_SGPIO_CLK_DLY             0x002c
-+
-+#define REG_SIPO_FLASH_MODE_CFG               0x0030
-+#define SERIAL_GPIO_FLASH_MODE                BIT(1)
-+#define SERIAL_GPIO_MODE_74HC164      BIT(0)
-+
-+#define REG_GPIO_FLASH_PRD_SET(_n)    (0x003c + ((_n) << 2))
-+#define GPIO_FLASH_PRD_MASK(_n)               GENMASK(15 + ((_n) << 4), ((_n) << 4))
-+
-+#define REG_GPIO_FLASH_MAP(_n)                (0x004c + ((_n) << 2))
-+#define GPIO_FLASH_SETID_MASK(_n)     GENMASK(2 + ((_n) << 2), ((_n) << 2))
-+#define GPIO_FLASH_EN(_n)             BIT(3 + ((_n) << 2))
-+
-+#define REG_SIPO_FLASH_MAP(_n)                (0x0054 + ((_n) << 2))
-+
-+#define REG_CYCLE_CFG_VALUE(_n)               (0x0098 + ((_n) << 2))
-+#define WAVE_GEN_CYCLE_MASK(_n)               GENMASK(7 + ((_n) << 3), ((_n) << 3))
-+
-+#define PWM_NUM_BUCKETS                       8
-+
-+struct airoha_pwm_bucket {
-+      /* Bitmask of PWM channels using this bucket */
-+      u64 used;
-+      u64 period_ns;
-+      u64 duty_ns;
-+};
-+
-+struct airoha_pwm {
-+      struct pwm_chip chip;
-+
-+      struct regmap *regmap;
-+
-+      struct device_node *np;
-+      u64 initialized;
-+
-+      struct airoha_pwm_bucket bucket[PWM_NUM_BUCKETS];
-+};
-+
-+/*
-+ * The first 16 GPIO pins, GPIO0-GPIO15, are mapped into 16 PWM channels, 0-15.
-+ * The SIPO GPIO pins are 17 pins which are mapped into 17 PWM channels, 16-32.
-+ * However, we've only got 8 concurrent waveform generators and can therefore
-+ * only use up to 8 different combinations of duty cycle and period at a time.
-+ */
-+#define PWM_NUM_GPIO  16
-+#define PWM_NUM_SIPO  17
-+
-+/* The PWM hardware supports periods between 4 ms and 1 s */
-+#define PERIOD_MIN_NS (4 * NSEC_PER_MSEC)
-+#define PERIOD_MAX_NS (1 * NSEC_PER_SEC)
-+/* It is represented internally as 1/250 s between 1 and 250 */
-+#define PERIOD_MIN    1
-+#define PERIOD_MAX    250
-+/* Duty cycle is relative with 255 corresponding to 100% */
-+#define DUTY_FULL     255
-+
-+static int airoha_pwm_get_generator(struct airoha_pwm *pc, u64 duty_ns,
-+                                  u64 period_ns)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(pc->bucket); i++) {
-+              if (!pc->bucket[i].used)
-+                      continue;
-+
-+              if (duty_ns == pc->bucket[i].duty_ns &&
-+                  period_ns == pc->bucket[i].period_ns)
-+                      return i;
-+
-+              /*
-+               * Unlike duty cycle zero, which can be handled by
-+               * disabling PWM, a generator is needed for full duty
-+               * cycle but it can be reused regardless of period
-+               */
-+              if (duty_ns == DUTY_FULL && pc->bucket[i].duty_ns == DUTY_FULL)
-+                      return i;
-+      }
-+
-+      return -1;
-+}
-+
-+static void airoha_pwm_release_bucket_config(struct airoha_pwm *pc,
-+                                           unsigned int hwpwm)
-+{
-+      int i;
-+
-+      for (i = 0; i < ARRAY_SIZE(pc->bucket); i++)
-+              pc->bucket[i].used &= ~BIT_ULL(hwpwm);
-+}
-+
-+static int airoha_pwm_consume_generator(struct airoha_pwm *pc,
-+                                      u64 duty_ns, u64 period_ns,
-+                                      unsigned int hwpwm)
-+{
-+      int id = airoha_pwm_get_generator(pc, duty_ns, period_ns);
-+
-+      if (id < 0) {
-+              int i;
-+
-+              /* find an unused waveform generator */
-+              for (i = 0; i < ARRAY_SIZE(pc->bucket); i++) {
-+                      if (!(pc->bucket[i].used & ~BIT_ULL(hwpwm))) {
-+                              id = i;
-+                              break;
-+                      }
-+              }
-+      }
-+
-+      if (id >= 0) {
-+              airoha_pwm_release_bucket_config(pc, hwpwm);
-+              pc->bucket[id].used |= BIT_ULL(hwpwm);
-+              pc->bucket[id].period_ns = period_ns;
-+              pc->bucket[id].duty_ns = duty_ns;
-+      }
-+
-+      return id;
-+}
-+
-+static int airoha_pwm_sipo_init(struct airoha_pwm *pc)
-+{
-+      u32 val;
-+
-+      if (!(pc->initialized >> PWM_NUM_GPIO))
-+              return 0;
-+
-+      regmap_clear_bits(pc->regmap, REG_SIPO_FLASH_MODE_CFG,
-+                        SERIAL_GPIO_MODE_74HC164);
-+
-+      /* Configure shift register timings, use 32x divisor */
-+      regmap_write(pc->regmap, REG_SGPIO_CLK_DIVR,
-+                   FIELD_PREP(REG_SGPIO_CLK_DIVR_MASK, 0x3));
-+
-+      /*
-+       * The actual delay is clock + 1.
-+       * Notice that clock delay should not be greater
-+       * than (divisor / 2) - 1.
-+       * Set to 0 by default. (aka 1)
-+       */
-+      regmap_write(pc->regmap, REG_SGPIO_CLK_DLY, 0x0);
-+
-+      /*
-+       * It it necessary to after muxing explicitly shift out all
-+       * zeroes to initialize the shift register before enabling PWM
-+       * mode because in PWM mode SIPO will not start shifting until
-+       * it needs to output a non-zero value (bit 31 of led_data
-+       * indicates shifting in progress and it must return to zero
-+       * before led_data can be written or PWM mode can be set)
-+       */
-+      if (regmap_read_poll_timeout(pc->regmap, REG_SGPIO_LED_DATA, val,
-+                                   !(val & SGPIO_LED_DATA_SHIFT_FLAG), 10,
-+                                   200 * USEC_PER_MSEC))
-+              return -ETIMEDOUT;
-+
-+      regmap_clear_bits(pc->regmap, REG_SGPIO_LED_DATA, SGPIO_LED_DATA_DATA);
-+      if (regmap_read_poll_timeout(pc->regmap, REG_SGPIO_LED_DATA, val,
-+                                   !(val & SGPIO_LED_DATA_SHIFT_FLAG), 10,
-+                                   200 * USEC_PER_MSEC))
-+              return -ETIMEDOUT;
-+
-+      /* Set SIPO in PWM mode */
-+      regmap_set_bits(pc->regmap, REG_SIPO_FLASH_MODE_CFG,
-+                      SERIAL_GPIO_FLASH_MODE);
-+
-+      return 0;
-+}
-+
-+static void airoha_pwm_calc_bucket_config(struct airoha_pwm *pc, int index,
-+                                        u64 duty_ns, u64 period_ns)
-+{
-+      u32 period, duty, mask, val;
-+      u64 tmp;
-+
-+      tmp = duty_ns * DUTY_FULL;
-+      duty = clamp_val(div64_u64(tmp, period_ns), 0, DUTY_FULL);
-+      tmp = period_ns * 25;
-+      period = clamp_val(div64_u64(tmp, 100000000), PERIOD_MIN, PERIOD_MAX);
-+
-+      /* Configure frequency divisor */
-+      mask = WAVE_GEN_CYCLE_MASK(index % 4);
-+      val = (period << __ffs(mask)) & mask;
-+      regmap_update_bits(pc->regmap, REG_CYCLE_CFG_VALUE(index / 4),
-+                         mask, val);
-+
-+      /* Configure duty cycle */
-+      duty = ((DUTY_FULL - duty) << 8) | duty;
-+      mask = GPIO_FLASH_PRD_MASK(index % 2);
-+      val = (duty << __ffs(mask)) & mask;
-+      regmap_update_bits(pc->regmap, REG_GPIO_FLASH_PRD_SET(index / 2),
-+                         mask, val);
-+}
-+
-+static void airoha_pwm_config_flash_map(struct airoha_pwm *pc,
-+                                      unsigned int hwpwm, int index)
-+{
-+      u32 addr, mask, val;
-+
-+      if (hwpwm < PWM_NUM_GPIO) {
-+              addr = REG_GPIO_FLASH_MAP(hwpwm / 8);
-+      } else {
-+              addr = REG_SIPO_FLASH_MAP(hwpwm / 8);
-+              hwpwm -= PWM_NUM_GPIO;
-+      }
-+
-+      if (index < 0) {
-+              /*
-+               * Change of waveform takes effect immediately but
-+               * disabling has some delay so to prevent glitching
-+               * only the enable bit is touched when disabling
-+               */
-+              regmap_clear_bits(pc->regmap, addr, GPIO_FLASH_EN(hwpwm % 8));
-+              return;
-+      }
-+
-+      mask = GPIO_FLASH_SETID_MASK(hwpwm % 8);
-+      val = ((index & 7) << __ffs(mask)) & mask;
-+      regmap_update_bits(pc->regmap, addr, mask, val);
-+      regmap_set_bits(pc->regmap, addr, GPIO_FLASH_EN(hwpwm % 8));
-+}
-+
-+static int airoha_pwm_config(struct airoha_pwm *pc, struct pwm_device *pwm,
-+                           u64 duty_ns, u64 period_ns)
-+{
-+      int index = -1;
-+
-+      index = airoha_pwm_consume_generator(pc, duty_ns, period_ns,
-+                                           pwm->hwpwm);
-+      if (index < 0)
-+              return -EBUSY;
-+
-+      if (!(pc->initialized & BIT_ULL(pwm->hwpwm)) &&
-+          pwm->hwpwm >= PWM_NUM_GPIO)
-+              airoha_pwm_sipo_init(pc);
-+
-+      if (index >= 0) {
-+              airoha_pwm_calc_bucket_config(pc, index, duty_ns, period_ns);
-+              airoha_pwm_config_flash_map(pc, pwm->hwpwm, index);
-+      } else {
-+              airoha_pwm_config_flash_map(pc, pwm->hwpwm, index);
-+              airoha_pwm_release_bucket_config(pc, pwm->hwpwm);
-+      }
-+
-+      pc->initialized |= BIT_ULL(pwm->hwpwm);
-+
-+      return 0;
-+}
-+
-+static void airoha_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-+{
-+      struct airoha_pwm *pc = container_of(chip, struct airoha_pwm, chip);
-+
-+      /* Disable PWM and release the waveform */
-+      airoha_pwm_config_flash_map(pc, pwm->hwpwm, -1);
-+      airoha_pwm_release_bucket_config(pc, pwm->hwpwm);
-+
-+      pc->initialized &= ~BIT_ULL(pwm->hwpwm);
-+      if (!(pc->initialized >> PWM_NUM_GPIO))
-+              regmap_clear_bits(pc->regmap, REG_SIPO_FLASH_MODE_CFG,
-+                                SERIAL_GPIO_FLASH_MODE);
-+}
-+
-+static int airoha_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
-+                          const struct pwm_state *state)
-+{
-+      struct airoha_pwm *pc = container_of(chip, struct airoha_pwm, chip);
-+      u64 duty = state->enabled ? state->duty_cycle : 0;
-+      u64 period = state->period;
-+
-+      /* Only normal polarity is supported */
-+      if (state->polarity == PWM_POLARITY_INVERSED)
-+              return -EINVAL;
-+
-+      if (!state->enabled) {
-+              airoha_pwm_disable(chip, pwm);
-+              return 0;
-+      }
-+
-+      if (period < PERIOD_MIN_NS)
-+              return -EINVAL;
-+
-+      if (period > PERIOD_MAX_NS)
-+              period = PERIOD_MAX_NS;
-+
-+      return airoha_pwm_config(pc, pwm, duty, period);
-+}
-+
-+static int airoha_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
-+                              struct pwm_state *state)
-+{
-+      struct airoha_pwm *pc = container_of(chip, struct airoha_pwm, chip);
-+      int i;
-+
-+      /* find hwpwm in waveform generator bucket */
-+      for (i = 0; i < ARRAY_SIZE(pc->bucket); i++) {
-+              if (pc->bucket[i].used & BIT_ULL(pwm->hwpwm)) {
-+                      state->enabled = pc->initialized & BIT_ULL(pwm->hwpwm);
-+                      state->polarity = PWM_POLARITY_NORMAL;
-+                      state->period = pc->bucket[i].period_ns;
-+                      state->duty_cycle = pc->bucket[i].duty_ns;
-+                      break;
-+              }
-+      }
-+
-+      if (i == ARRAY_SIZE(pc->bucket))
-+              state->enabled = false;
-+
-+      return 0;
-+}
-+
-+static const struct pwm_ops airoha_pwm_ops = {
-+      .get_state = airoha_pwm_get_state,
-+      .apply = airoha_pwm_apply,
-+      .owner = THIS_MODULE,
-+};
-+
-+static int airoha_pwm_probe(struct platform_device *pdev)
-+{
-+      struct device *dev = &pdev->dev;
-+      struct airoha_pwm *pc;
-+
-+      pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
-+      if (!pc)
-+              return -ENOMEM;
-+
-+      pc->np = dev->of_node;
-+      pc->chip.dev = dev;
-+      pc->chip.ops = &airoha_pwm_ops;
-+      pc->chip.npwm = PWM_NUM_GPIO + PWM_NUM_SIPO;
-+
-+      pc->regmap = device_node_to_regmap(dev->parent->of_node);
-+      if (IS_ERR(pc->regmap))
-+              return PTR_ERR(pc->regmap);
-+
-+      return devm_pwmchip_add(&pdev->dev, &pc->chip);
-+}
-+
-+static const struct of_device_id airoha_pwm_of_match[] = {
-+      { .compatible = "airoha,en7581-pwm" },
-+      { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, airoha_pwm_of_match);
-+
-+static struct platform_driver airoha_pwm_driver = {
-+      .driver = {
-+              .name = "pwm-airoha",
-+              .of_match_table = airoha_pwm_of_match,
-+      },
-+      .probe = airoha_pwm_probe,
-+};
-+module_platform_driver(airoha_pwm_driver);
-+
-+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
-+MODULE_AUTHOR("Markus Gothe <markus.gothe@genexis.eu>");
-+MODULE_AUTHOR("Benjamin Larsson <benjamin.larsson@genexis.eu>");
-+MODULE_DESCRIPTION("Airoha EN7581 PWM driver");
-+MODULE_LICENSE("GPL");
diff --git a/target/linux/airoha/patches-6.6/200-spinlock-extend-guard-with-spinlock_bh-variants.patch b/target/linux/airoha/patches-6.6/200-spinlock-extend-guard-with-spinlock_bh-variants.patch
deleted file mode 100644 (file)
index d4905d5..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-From 38d2c6aafc5bbcad3ec36f6d3356b3debd40f6fd Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Wed, 16 Oct 2024 20:26:05 +0200
-Subject: [RFC PATCH v2 1/3] spinlock: extend guard with spinlock_bh variants
-
-Extend guard APIs with missing raw/spinlock_bh variants.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- include/linux/spinlock.h | 13 +++++++++++++
- 1 file changed, 13 insertions(+)
-
---- a/include/linux/spinlock.h
-+++ b/include/linux/spinlock.h
-@@ -519,6 +519,10 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, ra
- DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
-+DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
-+                  raw_spin_lock_bh(_T->lock),
-+                  raw_spin_unlock_bh(_T->lock))
-+
- DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
-                   raw_spin_lock_irqsave(_T->lock, _T->flags),
-                   raw_spin_unlock_irqrestore(_T->lock, _T->flags),
-@@ -540,6 +544,10 @@ DEFINE_LOCK_GUARD_1(spinlock_irq, spinlo
- DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
-                        spin_trylock_irq(_T->lock))
-+DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t,
-+                  spin_lock_bh(_T->lock),
-+                  spin_unlock_bh(_T->lock))
-+
- DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
-                   spin_lock_irqsave(_T->lock, _T->flags),
-                   spin_unlock_irqrestore(_T->lock, _T->flags),
diff --git a/target/linux/airoha/patches-6.6/201-crypto-Add-Mediatek-EIP-93-crypto-engine-support.patch b/target/linux/airoha/patches-6.6/201-crypto-Add-Mediatek-EIP-93-crypto-engine-support.patch
deleted file mode 100644 (file)
index 1cb5812..0000000
+++ /dev/null
@@ -1,4206 +0,0 @@
-From 45260ebcfb17a47bbad37055024dad50f2fcc5d0 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Wed, 27 Oct 2021 17:13:29 +0800
-Subject: [RFC PATCH v2 3/3] crypto: Add Mediatek EIP-93 crypto engine support
-
-Add support for the Mediatek EIP-93 crypto engine used on MT7621 and new
-Airoha SoC.
-
-EIP-93 IP supports AES/DES/3DES ciphers in ECB/CBC and CTR modes as well as
-authenc(HMAC(x), cipher(y)) using HMAC MD5, SHA1, SHA224 and SHA256.
-
-EIP-93 provide regs to signal support for specific chipers and the
-driver dynamically register only the supported one by the chip.
-
-Signed-off-by: Richard van Schagen <vschagen@icloud.com>
-Co-developed-by: Christian Marangi <ansuelsmth@gmail.com>
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
-Changes v2:
-- Rename all variables from mtk to eip93
-- Move to inside-secure directory
-- Check DMA map errors
-- Use guard API for spinlock
-- Minor improvements to code
-
- drivers/crypto/Kconfig                        |   1 +
- drivers/crypto/Makefile                       |   1 +
- drivers/crypto/inside-secure/eip93/Kconfig    |  20 +
- drivers/crypto/inside-secure/eip93/Makefile   |   5 +
- .../crypto/inside-secure/eip93/eip93-aead.c   | 702 ++++++++++++++
- .../crypto/inside-secure/eip93/eip93-aead.h   |  38 +
- .../crypto/inside-secure/eip93/eip93-aes.h    |  16 +
- .../crypto/inside-secure/eip93/eip93-cipher.c | 407 ++++++++
- .../crypto/inside-secure/eip93/eip93-cipher.h |  60 ++
- .../crypto/inside-secure/eip93/eip93-common.c | 824 ++++++++++++++++
- .../crypto/inside-secure/eip93/eip93-common.h |  25 +
- .../crypto/inside-secure/eip93/eip93-des.h    |  16 +
- .../crypto/inside-secure/eip93/eip93-hash.c   | 909 ++++++++++++++++++
- .../crypto/inside-secure/eip93/eip93-hash.h   |  72 ++
- .../crypto/inside-secure/eip93/eip93-main.c   | 502 ++++++++++
- .../crypto/inside-secure/eip93/eip93-main.h   | 155 +++
- .../crypto/inside-secure/eip93/eip93-regs.h   | 335 +++++++
- 17 files changed, 4088 insertions(+)
- create mode 100644 drivers/crypto/inside-secure/eip93/Kconfig
- create mode 100644 drivers/crypto/inside-secure/eip93/Makefile
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-aead.c
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-aead.h
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-aes.h
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-cipher.c
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-cipher.h
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-common.c
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-common.h
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-des.h
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-hash.c
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-hash.h
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-main.c
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-main.h
- create mode 100644 drivers/crypto/inside-secure/eip93/eip93-regs.h
-
---- a/drivers/crypto/Kconfig
-+++ b/drivers/crypto/Kconfig
-@@ -796,5 +796,6 @@ config CRYPTO_DEV_SA2UL
- source "drivers/crypto/aspeed/Kconfig"
- source "drivers/crypto/starfive/Kconfig"
-+source "drivers/crypto/inside-secure/eip93/Kconfig"
- endif # CRYPTO_HW
---- a/drivers/crypto/Makefile
-+++ b/drivers/crypto/Makefile
-@@ -51,3 +51,4 @@ obj-y += hisilicon/
- obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
- obj-y += intel/
- obj-y += starfive/
-+obj-y += inside-secure/eip93/
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/Kconfig
-@@ -0,0 +1,20 @@
-+# SPDX-License-Identifier: GPL-2.0
-+config CRYPTO_DEV_EIP93
-+      tristate "Support for EIP93 crypto HW accelerators"
-+      depends on SOC_MT7621 || ARCH_AIROHA ||COMPILE_TEST
-+      select CRYPTO_LIB_AES
-+      select CRYPTO_LIB_DES
-+      select CRYPTO_SKCIPHER
-+      select CRYPTO_AEAD
-+      select CRYPTO_AUTHENC
-+      select CRYPTO_MD5
-+      select CRYPTO_SHA1
-+      select CRYPTO_SHA256
-+      help
-+        EIP93 have various crypto HW accelerators. Select this if
-+        you want to use the EIP93 modules for any of the crypto algorithms.
-+
-+        If the IP supports it, this provide offload for AES - ECB, CBC and
-+        CTR crypto. Also provide DES and 3DES ECB and CBC.
-+
-+        Also provide AEAD authenc(hmac(x), cipher(y)) for supported algo.
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/Makefile
-@@ -0,0 +1,5 @@
-+obj-$(CONFIG_CRYPTO_DEV_EIP93) += crypto-hw-eip93.o
-+
-+crypto-hw-eip93-y += eip93-main.o eip93-common.o
-+crypto-hw-eip93-y += eip93-cipher.o eip93-aead.o
-+crypto-hw-eip93-y += eip93-hash.o
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-aead.c
-@@ -0,0 +1,702 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2019 - 2021
-+ *
-+ * Richard van Schagen <vschagen@icloud.com>
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+
-+#include <crypto/aead.h>
-+#include <crypto/aes.h>
-+#include <crypto/authenc.h>
-+#include <crypto/ctr.h>
-+#include <crypto/hmac.h>
-+#include <crypto/internal/aead.h>
-+#include <crypto/md5.h>
-+#include <crypto/null.h>
-+#include <crypto/sha1.h>
-+#include <crypto/sha2.h>
-+
-+#include <crypto/internal/des.h>
-+
-+#include <linux/crypto.h>
-+#include <linux/dma-mapping.h>
-+
-+#include "eip93-aead.h"
-+#include "eip93-cipher.h"
-+#include "eip93-common.h"
-+#include "eip93-regs.h"
-+
-+void eip93_aead_handle_result(struct crypto_async_request *async, int err)
-+{
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
-+      struct eip93_device *mtk = ctx->mtk;
-+      struct aead_request *req = aead_request_cast(async);
-+      struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
-+
-+      eip93_unmap_dma(mtk, rctx, req->src, req->dst);
-+      eip93_handle_result(mtk, rctx, req->iv);
-+
-+      aead_request_complete(req, err);
-+}
-+
-+static int eip93_aead_send_req(struct crypto_async_request *async)
-+{
-+      struct aead_request *req = aead_request_cast(async);
-+      struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
-+      int err;
-+
-+      err = check_valid_request(rctx);
-+      if (err) {
-+              aead_request_complete(req, err);
-+              return err;
-+      }
-+
-+      return eip93_send_req(async, req->iv, rctx);
-+}
-+
-+/* Crypto aead API functions */
-+static int eip93_aead_cra_init(struct crypto_tfm *tfm)
-+{
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-+      struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
-+                              struct eip93_alg_template, alg.aead.base);
-+
-+      crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-+                              sizeof(struct eip93_cipher_reqctx));
-+
-+      ctx->mtk = tmpl->mtk;
-+      ctx->flags = tmpl->flags;
-+      ctx->type = tmpl->type;
-+      ctx->set_assoc = true;
-+
-+      ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL);
-+      if (!ctx->sa_record)
-+              return -ENOMEM;
-+
-+      return 0;
-+}
-+
-+static void eip93_aead_cra_exit(struct crypto_tfm *tfm)
-+{
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+      dma_unmap_single(ctx->mtk->dev, ctx->sa_record_base,
-+                       sizeof(*ctx->sa_record), DMA_TO_DEVICE);
-+      kfree(ctx->sa_record);
-+}
-+
-+static int eip93_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
-+                           unsigned int len)
-+{
-+      struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-+      struct crypto_authenc_keys keys;
-+      struct crypto_aes_ctx aes;
-+      struct sa_record *sa_record = ctx->sa_record;
-+      u32 nonce = 0;
-+      int ret;
-+
-+      if (crypto_authenc_extractkeys(&keys, key, len))
-+              return -EINVAL;
-+
-+      if (IS_RFC3686(ctx->flags)) {
-+              if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
-+                      return -EINVAL;
-+
-+              keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
-+              memcpy(&nonce, keys.enckey + keys.enckeylen,
-+                     CTR_RFC3686_NONCE_SIZE);
-+      }
-+
-+      switch ((ctx->flags & EIP93_ALG_MASK)) {
-+      case EIP93_ALG_DES:
-+              ret = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
-+              break;
-+      case EIP93_ALG_3DES:
-+              if (keys.enckeylen != DES3_EDE_KEY_SIZE)
-+                      return -EINVAL;
-+
-+              ret = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
-+              break;
-+      case EIP93_ALG_AES:
-+              ret = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
-+      }
-+      if (ret)
-+              return ret;
-+
-+      ctx->blksize = crypto_aead_blocksize(ctfm);
-+      /* Encryption key */
-+      eip93_set_sa_record(sa_record, keys.enckeylen, ctx->flags);
-+      sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE;
-+      sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE,
-+                                            EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH);
-+      sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
-+      sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
-+                                            ctx->authsize / sizeof(u32));
-+
-+      memcpy(sa_record->sa_key, keys.enckey, keys.enckeylen);
-+      ctx->sa_nonce = nonce;
-+      sa_record->sa_nonce = nonce;
-+
-+      /* authentication key */
-+      ret = eip93_authenc_setkey(ctfm, sa_record, keys.authkey,
-+                                 keys.authkeylen);
-+
-+      ctx->set_assoc = true;
-+
-+      return ret;
-+}
-+
-+static int eip93_aead_setauthsize(struct crypto_aead *ctfm,
-+                                unsigned int authsize)
-+{
-+      struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+      ctx->authsize = authsize;
-+      ctx->sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
-+      ctx->sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
-+                                                 ctx->authsize / sizeof(u32));
-+
-+      return 0;
-+}
-+
-+static void eip93_aead_setassoc(struct eip93_crypto_ctx *ctx,
-+                              struct aead_request *req)
-+{
-+      struct sa_record *sa_record = ctx->sa_record;
-+
-+      sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HASH_CRYPT_OFFSET;
-+      sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_HASH_CRYPT_OFFSET,
-+                                            req->assoclen / sizeof(u32));
-+
-+      ctx->assoclen = req->assoclen;
-+}
-+
-+static int eip93_aead_crypt(struct aead_request *req)
-+{
-+      struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
-+      struct crypto_async_request *async = &req->base;
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
-+      struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+      int ret;
-+
-+      ctx->sa_record_base = dma_map_single(ctx->mtk->dev, ctx->sa_record,
-+                                           sizeof(*ctx->sa_record), DMA_TO_DEVICE);
-+      ret = dma_mapping_error(ctx->mtk->dev, ctx->sa_record_base);
-+      if (ret)
-+              return ret;
-+
-+      rctx->textsize = req->cryptlen;
-+      rctx->blksize = ctx->blksize;
-+      rctx->assoclen = req->assoclen;
-+      rctx->authsize = ctx->authsize;
-+      rctx->sg_src = req->src;
-+      rctx->sg_dst = req->dst;
-+      rctx->ivsize = crypto_aead_ivsize(aead);
-+      rctx->desc_flags = EIP93_DESC_AEAD;
-+      rctx->sa_record_base = ctx->sa_record_base;
-+
-+      if (IS_DECRYPT(rctx->flags))
-+              rctx->textsize -= rctx->authsize;
-+
-+      return eip93_aead_send_req(async);
-+}
-+
-+static int eip93_aead_encrypt(struct aead_request *req)
-+{
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
-+      struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
-+
-+      rctx->flags = ctx->flags;
-+      rctx->flags |= EIP93_ENCRYPT;
-+      if (ctx->set_assoc) {
-+              eip93_aead_setassoc(ctx, req);
-+              ctx->set_assoc = false;
-+      }
-+
-+      if (req->assoclen != ctx->assoclen) {
-+              dev_err(ctx->mtk->dev, "Request AAD length error\n");
-+              return -EINVAL;
-+      }
-+
-+      return eip93_aead_crypt(req);
-+}
-+
-+static int eip93_aead_decrypt(struct aead_request *req)
-+{
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
-+      struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
-+
-+      ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN;
-+      ctx->sa_record->sa_cmd1_word &= ~(EIP93_SA_CMD_COPY_PAD |
-+                                        EIP93_SA_CMD_COPY_DIGEST);
-+
-+      rctx->flags = ctx->flags;
-+      rctx->flags |= EIP93_DECRYPT;
-+      if (ctx->set_assoc) {
-+              eip93_aead_setassoc(ctx, req);
-+              ctx->set_assoc = false;
-+      }
-+
-+      if (req->assoclen != ctx->assoclen) {
-+              dev_err(ctx->mtk->dev, "Request AAD length error\n");
-+              return -EINVAL;
-+      }
-+
-+      return eip93_aead_crypt(req);
-+}
-+
-+/* Available authenc algorithms in this module */
-+struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_AES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = AES_BLOCK_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = MD5_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(md5),cbc(aes))",
-+                      .cra_driver_name =
-+                              "authenc(hmac(md5-eip93), cbc(aes-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = AES_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_AES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = AES_BLOCK_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = SHA1_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(sha1),cbc(aes))",
-+                      .cra_driver_name =
-+                              "authenc(hmac(sha1-eip93),cbc(aes-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = AES_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_AES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = AES_BLOCK_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = SHA224_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(sha224),cbc(aes))",
-+                      .cra_driver_name =
-+                              "authenc(hmac(sha224-eip93),cbc(aes-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = AES_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_AES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = AES_BLOCK_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = SHA256_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(sha256),cbc(aes))",
-+                      .cra_driver_name =
-+                              "authenc(hmac(sha256-eip93),cbc(aes-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = AES_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 |
-+                      EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = CTR_RFC3686_IV_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = MD5_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
-+                      .cra_driver_name =
-+                      "authenc(hmac(md5-eip93),rfc3686(ctr(aes-eip93)))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = 1,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 |
-+                      EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = CTR_RFC3686_IV_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = SHA1_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
-+                      .cra_driver_name =
-+                      "authenc(hmac(sha1-eip93),rfc3686(ctr(aes-eip93)))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = 1,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 |
-+                      EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = CTR_RFC3686_IV_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = SHA224_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
-+                      .cra_driver_name =
-+                      "authenc(hmac(sha224-eip93),rfc3686(ctr(aes-eip93)))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = 1,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 |
-+                      EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = CTR_RFC3686_IV_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = SHA256_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
-+                      .cra_driver_name =
-+                      "authenc(hmac(sha256-eip93),rfc3686(ctr(aes-eip93)))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = 1,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_DES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = DES_BLOCK_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = MD5_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(md5),cbc(des))",
-+                      .cra_driver_name =
-+                              "authenc(hmac(md5-eip93),cbc(des-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = DES_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_DES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = DES_BLOCK_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = SHA1_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(sha1),cbc(des))",
-+                      .cra_driver_name =
-+                              "authenc(hmac(sha1-eip93),cbc(des-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = DES_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_DES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = DES_BLOCK_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = SHA224_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(sha224),cbc(des))",
-+                      .cra_driver_name =
-+                              "authenc(hmac(sha224-eip93),cbc(des-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = DES_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_DES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = DES_BLOCK_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = SHA256_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(sha256),cbc(des))",
-+                      .cra_driver_name =
-+                              "authenc(hmac(sha256-eip93),cbc(des-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = DES_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_3DES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = DES3_EDE_BLOCK_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = MD5_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
-+                      .cra_driver_name =
-+                              "authenc(hmac(md5-eip93),cbc(des3_ede-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0x0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_3DES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = DES3_EDE_BLOCK_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = SHA1_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
-+                      .cra_driver_name =
-+                              "authenc(hmac(sha1-eip93),cbc(des3_ede-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0x0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_3DES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = DES3_EDE_BLOCK_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = SHA224_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
-+                      .cra_driver_name =
-+                      "authenc(hmac(sha224-eip93),cbc(des3_ede-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0x0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede = {
-+      .type = EIP93_ALG_TYPE_AEAD,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_3DES,
-+      .alg.aead = {
-+              .setkey = eip93_aead_setkey,
-+              .encrypt = eip93_aead_encrypt,
-+              .decrypt = eip93_aead_decrypt,
-+              .ivsize = DES3_EDE_BLOCK_SIZE,
-+              .setauthsize = eip93_aead_setauthsize,
-+              .maxauthsize = SHA256_DIGEST_SIZE,
-+              .base = {
-+                      .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
-+                      .cra_driver_name =
-+                      "authenc(hmac(sha256-eip93),cbc(des3_ede-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                      CRYPTO_ALG_ALLOCATES_MEMORY,
-+                      .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0x0,
-+                      .cra_init = eip93_aead_cra_init,
-+                      .cra_exit = eip93_aead_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-aead.h
-@@ -0,0 +1,38 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2019 - 2021
-+ *
-+ * Richard van Schagen <vschagen@icloud.com>
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+#ifndef _EIP93_AEAD_H_
-+#define _EIP93_AEAD_H_
-+
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ctr_aes;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ctr_aes;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ctr_aes;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ctr_aes;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ecb_null;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ecb_null;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ecb_null;
-+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ecb_null;
-+
-+void eip93_aead_handle_result(struct crypto_async_request *async, int err);
-+
-+#endif /* _EIP93_AEAD_H_ */
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-aes.h
-@@ -0,0 +1,16 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2019 - 2021
-+ *
-+ * Richard van Schagen <vschagen@icloud.com>
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+#ifndef _EIP93_AES_H_
-+#define _EIP93_AES_H_
-+
-+extern struct eip93_alg_template eip93_alg_ecb_aes;
-+extern struct eip93_alg_template eip93_alg_cbc_aes;
-+extern struct eip93_alg_template eip93_alg_ctr_aes;
-+extern struct eip93_alg_template eip93_alg_rfc3686_aes;
-+
-+#endif /* _EIP93_AES_H_ */
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.c
-@@ -0,0 +1,407 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2019 - 2021
-+ *
-+ * Richard van Schagen <vschagen@icloud.com>
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+
-+#include <crypto/aes.h>
-+#include <crypto/ctr.h>
-+#include <crypto/internal/des.h>
-+#include <linux/dma-mapping.h>
-+
-+#include "eip93-cipher.h"
-+#include "eip93-common.h"
-+#include "eip93-regs.h"
-+
-+void eip93_skcipher_handle_result(struct crypto_async_request *async, int err)
-+{
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
-+      struct eip93_device *mtk = ctx->mtk;
-+      struct skcipher_request *req = skcipher_request_cast(async);
-+      struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
-+
-+      eip93_unmap_dma(mtk, rctx, req->src, req->dst);
-+      eip93_handle_result(mtk, rctx, req->iv);
-+
-+      skcipher_request_complete(req, err);
-+}
-+
-+static int eip93_skcipher_send_req(struct crypto_async_request *async)
-+{
-+      struct skcipher_request *req = skcipher_request_cast(async);
-+      struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
-+      int err;
-+
-+      err = check_valid_request(rctx);
-+
-+      if (err) {
-+              skcipher_request_complete(req, err);
-+              return err;
-+      }
-+
-+      return eip93_send_req(async, req->iv, rctx);
-+}
-+
-+/* Crypto skcipher API functions */
-+static int eip93_skcipher_cra_init(struct crypto_tfm *tfm)
-+{
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-+      struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
-+                              struct eip93_alg_template, alg.skcipher.base);
-+
-+      crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
-+                                  sizeof(struct eip93_cipher_reqctx));
-+
-+      memset(ctx, 0, sizeof(*ctx));
-+
-+      ctx->mtk = tmpl->mtk;
-+      ctx->type = tmpl->type;
-+
-+      ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL);
-+      if (!ctx->sa_record)
-+              return -ENOMEM;
-+
-+      return 0;
-+}
-+
-+static void eip93_skcipher_cra_exit(struct crypto_tfm *tfm)
-+{
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+      dma_unmap_single(ctx->mtk->dev, ctx->sa_record_base,
-+                       sizeof(*ctx->sa_record), DMA_TO_DEVICE);
-+      kfree(ctx->sa_record);
-+}
-+
-+static int eip93_skcipher_setkey(struct crypto_skcipher *ctfm, const u8 *key,
-+                               unsigned int len)
-+{
-+      struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-+      struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
-+                                                   struct eip93_alg_template,
-+                                                   alg.skcipher.base);
-+      struct sa_record *sa_record = ctx->sa_record;
-+      unsigned int keylen = len;
-+      u32 flags = tmpl->flags;
-+      u32 nonce = 0;
-+      int ret;
-+
-+      if (!key || !keylen)
-+              return -EINVAL;
-+
-+      if (IS_RFC3686(flags)) {
-+              if (len < CTR_RFC3686_NONCE_SIZE)
-+                      return -EINVAL;
-+
-+              keylen = len - CTR_RFC3686_NONCE_SIZE;
-+              memcpy(&nonce, key + keylen, CTR_RFC3686_NONCE_SIZE);
-+      }
-+
-+      if (flags & EIP93_ALG_DES) {
-+              ctx->blksize = DES_BLOCK_SIZE;
-+              ret = verify_skcipher_des_key(ctfm, key);
-+      }
-+      if (flags & EIP93_ALG_3DES) {
-+              ctx->blksize = DES3_EDE_BLOCK_SIZE;
-+              ret = verify_skcipher_des3_key(ctfm, key);
-+      }
-+
-+      if (flags & EIP93_ALG_AES) {
-+              struct crypto_aes_ctx aes;
-+
-+              ctx->blksize = AES_BLOCK_SIZE;
-+              ret = aes_expandkey(&aes, key, keylen);
-+      }
-+      if (ret)
-+              return ret;
-+
-+      eip93_set_sa_record(sa_record, keylen, flags);
-+
-+      memcpy(sa_record->sa_key, key, keylen);
-+      ctx->sa_nonce = nonce;
-+      sa_record->sa_nonce = nonce;
-+
-+      return 0;
-+}
-+
-+static int eip93_skcipher_crypt(struct skcipher_request *req)
-+{
-+      struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
-+      struct crypto_async_request *async = &req->base;
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
-+      struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-+      int ret;
-+
-+      if (!req->cryptlen)
-+              return 0;
-+
-+      /*
-+       * ECB and CBC algorithms require message lengths to be
-+       * multiples of block size.
-+       */
-+      if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
-+              if (!IS_ALIGNED(req->cryptlen,
-+                              crypto_skcipher_blocksize(skcipher)))
-+                      return -EINVAL;
-+
-+      ctx->sa_record_base = dma_map_single(ctx->mtk->dev, ctx->sa_record,
-+                                           sizeof(*ctx->sa_record), DMA_TO_DEVICE);
-+      ret = dma_mapping_error(ctx->mtk->dev, ctx->sa_record_base);
-+      if (ret)
-+              return ret;
-+
-+      rctx->assoclen = 0;
-+      rctx->textsize = req->cryptlen;
-+      rctx->authsize = 0;
-+      rctx->sg_src = req->src;
-+      rctx->sg_dst = req->dst;
-+      rctx->ivsize = crypto_skcipher_ivsize(skcipher);
-+      rctx->blksize = ctx->blksize;
-+      rctx->desc_flags = EIP93_DESC_SKCIPHER;
-+      rctx->sa_record_base = ctx->sa_record_base;
-+
-+      return eip93_skcipher_send_req(async);
-+}
-+
-+static int eip93_skcipher_encrypt(struct skcipher_request *req)
-+{
-+      struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
-+      struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg,
-+                              struct eip93_alg_template, alg.skcipher.base);
-+
-+      rctx->flags = tmpl->flags;
-+      rctx->flags |= EIP93_ENCRYPT;
-+
-+      return eip93_skcipher_crypt(req);
-+}
-+
-+static int eip93_skcipher_decrypt(struct skcipher_request *req)
-+{
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
-+      struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
-+      struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg,
-+                              struct eip93_alg_template, alg.skcipher.base);
-+
-+      ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN;
-+
-+      rctx->flags = tmpl->flags;
-+      rctx->flags |= EIP93_DECRYPT;
-+
-+      return eip93_skcipher_crypt(req);
-+}
-+
-+/* Available algorithms in this module */
-+struct eip93_alg_template eip93_alg_ecb_aes = {
-+      .type = EIP93_ALG_TYPE_SKCIPHER,
-+      .flags = EIP93_MODE_ECB | EIP93_ALG_AES,
-+      .alg.skcipher = {
-+              .setkey = eip93_skcipher_setkey,
-+              .encrypt = eip93_skcipher_encrypt,
-+              .decrypt = eip93_skcipher_decrypt,
-+              .min_keysize = AES_MIN_KEY_SIZE,
-+              .max_keysize = AES_MAX_KEY_SIZE,
-+              .ivsize = 0,
-+              .base = {
-+                      .cra_name = "ecb(aes)",
-+                      .cra_driver_name = "ecb(aes-eip93)",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_NEED_FALLBACK |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
-+                      .cra_blocksize = AES_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0xf,
-+                      .cra_init = eip93_skcipher_cra_init,
-+                      .cra_exit = eip93_skcipher_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_cbc_aes = {
-+      .type = EIP93_ALG_TYPE_SKCIPHER,
-+      .flags = EIP93_MODE_CBC | EIP93_ALG_AES,
-+      .alg.skcipher = {
-+              .setkey = eip93_skcipher_setkey,
-+              .encrypt = eip93_skcipher_encrypt,
-+              .decrypt = eip93_skcipher_decrypt,
-+              .min_keysize = AES_MIN_KEY_SIZE,
-+              .max_keysize = AES_MAX_KEY_SIZE,
-+              .ivsize = AES_BLOCK_SIZE,
-+              .base = {
-+                      .cra_name = "cbc(aes)",
-+                      .cra_driver_name = "cbc(aes-eip93)",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_NEED_FALLBACK |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
-+                      .cra_blocksize = AES_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0xf,
-+                      .cra_init = eip93_skcipher_cra_init,
-+                      .cra_exit = eip93_skcipher_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_ctr_aes = {
-+      .type = EIP93_ALG_TYPE_SKCIPHER,
-+      .flags = EIP93_MODE_CTR | EIP93_ALG_AES,
-+      .alg.skcipher = {
-+              .setkey = eip93_skcipher_setkey,
-+              .encrypt = eip93_skcipher_encrypt,
-+              .decrypt = eip93_skcipher_decrypt,
-+              .min_keysize = AES_MIN_KEY_SIZE,
-+              .max_keysize = AES_MAX_KEY_SIZE,
-+              .ivsize = AES_BLOCK_SIZE,
-+              .base = {
-+                      .cra_name = "ctr(aes)",
-+                      .cra_driver_name = "ctr(aes-eip93)",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                   CRYPTO_ALG_NEED_FALLBACK |
-+                                   CRYPTO_ALG_KERN_DRIVER_ONLY,
-+                      .cra_blocksize = 1,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0xf,
-+                      .cra_init = eip93_skcipher_cra_init,
-+                      .cra_exit = eip93_skcipher_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_rfc3686_aes = {
-+      .type = EIP93_ALG_TYPE_SKCIPHER,
-+      .flags = EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
-+      .alg.skcipher = {
-+              .setkey = eip93_skcipher_setkey,
-+              .encrypt = eip93_skcipher_encrypt,
-+              .decrypt = eip93_skcipher_decrypt,
-+              .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
-+              .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
-+              .ivsize = CTR_RFC3686_IV_SIZE,
-+              .base = {
-+                      .cra_name = "rfc3686(ctr(aes))",
-+                      .cra_driver_name = "rfc3686(ctr(aes-eip93))",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_NEED_FALLBACK |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
-+                      .cra_blocksize = 1,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0xf,
-+                      .cra_init = eip93_skcipher_cra_init,
-+                      .cra_exit = eip93_skcipher_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_ecb_des = {
-+      .type = EIP93_ALG_TYPE_SKCIPHER,
-+      .flags = EIP93_MODE_ECB | EIP93_ALG_DES,
-+      .alg.skcipher = {
-+              .setkey = eip93_skcipher_setkey,
-+              .encrypt = eip93_skcipher_encrypt,
-+              .decrypt = eip93_skcipher_decrypt,
-+              .min_keysize = DES_KEY_SIZE,
-+              .max_keysize = DES_KEY_SIZE,
-+              .ivsize = 0,
-+              .base = {
-+                      .cra_name = "ecb(des)",
-+                      .cra_driver_name = "ebc(des-eip93)",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
-+                      .cra_blocksize = DES_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_skcipher_cra_init,
-+                      .cra_exit = eip93_skcipher_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_cbc_des = {
-+      .type = EIP93_ALG_TYPE_SKCIPHER,
-+      .flags = EIP93_MODE_CBC | EIP93_ALG_DES,
-+      .alg.skcipher = {
-+              .setkey = eip93_skcipher_setkey,
-+              .encrypt = eip93_skcipher_encrypt,
-+              .decrypt = eip93_skcipher_decrypt,
-+              .min_keysize = DES_KEY_SIZE,
-+              .max_keysize = DES_KEY_SIZE,
-+              .ivsize = DES_BLOCK_SIZE,
-+              .base = {
-+                      .cra_name = "cbc(des)",
-+                      .cra_driver_name = "cbc(des-eip93)",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
-+                      .cra_blocksize = DES_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_skcipher_cra_init,
-+                      .cra_exit = eip93_skcipher_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_ecb_des3_ede = {
-+      .type = EIP93_ALG_TYPE_SKCIPHER,
-+      .flags = EIP93_MODE_ECB | EIP93_ALG_3DES,
-+      .alg.skcipher = {
-+              .setkey = eip93_skcipher_setkey,
-+              .encrypt = eip93_skcipher_encrypt,
-+              .decrypt = eip93_skcipher_decrypt,
-+              .min_keysize = DES3_EDE_KEY_SIZE,
-+              .max_keysize = DES3_EDE_KEY_SIZE,
-+              .ivsize = 0,
-+              .base = {
-+                      .cra_name = "ecb(des3_ede)",
-+                      .cra_driver_name = "ecb(des3_ede-eip93)",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
-+                      .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_skcipher_cra_init,
-+                      .cra_exit = eip93_skcipher_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_cbc_des3_ede = {
-+      .type = EIP93_ALG_TYPE_SKCIPHER,
-+      .flags = EIP93_MODE_CBC | EIP93_ALG_3DES,
-+      .alg.skcipher = {
-+              .setkey = eip93_skcipher_setkey,
-+              .encrypt = eip93_skcipher_encrypt,
-+              .decrypt = eip93_skcipher_decrypt,
-+              .min_keysize = DES3_EDE_KEY_SIZE,
-+              .max_keysize = DES3_EDE_KEY_SIZE,
-+              .ivsize = DES3_EDE_BLOCK_SIZE,
-+              .base = {
-+                      .cra_name = "cbc(des3_ede)",
-+                      .cra_driver_name = "cbc(des3_ede-eip93)",
-+                      .cra_priority = EIP93_CRA_PRIORITY,
-+                      .cra_flags = CRYPTO_ALG_ASYNC |
-+                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
-+                      .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+                      .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
-+                      .cra_alignmask = 0,
-+                      .cra_init = eip93_skcipher_cra_init,
-+                      .cra_exit = eip93_skcipher_cra_exit,
-+                      .cra_module = THIS_MODULE,
-+              },
-+      },
-+};
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.h
-@@ -0,0 +1,60 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2019 - 2021
-+ *
-+ * Richard van Schagen <vschagen@icloud.com>
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+#ifndef _EIP93_CIPHER_H_
-+#define _EIP93_CIPHER_H_
-+
-+#include "eip93-main.h"
-+
-+struct eip93_crypto_ctx {
-+      struct eip93_device             *mtk;
-+      u32                             flags;
-+      struct sa_record                *sa_record;
-+      u32                             sa_nonce;
-+      int                             blksize;
-+      dma_addr_t                      sa_record_base;
-+      /* AEAD specific */
-+      unsigned int                    authsize;
-+      unsigned int                    assoclen;
-+      bool                            set_assoc;
-+      enum eip93_alg_type             type;
-+};
-+
-+struct eip93_cipher_reqctx {
-+      u16                             desc_flags;
-+      u16                             flags;
-+      unsigned int                    blksize;
-+      unsigned int                    ivsize;
-+      unsigned int                    textsize;
-+      unsigned int                    assoclen;
-+      unsigned int                    authsize;
-+      dma_addr_t                      sa_record_base;
-+      struct sa_state                 *sa_state;
-+      dma_addr_t                      sa_state_base;
-+      struct eip93_descriptor         *cdesc;
-+      struct scatterlist              *sg_src;
-+      struct scatterlist              *sg_dst;
-+      int                             src_nents;
-+      int                             dst_nents;
-+      struct sa_state                 *sa_state_ctr;
-+      dma_addr_t                      sa_state_ctr_base;
-+};
-+
-+int check_valid_request(struct eip93_cipher_reqctx *rctx);
-+
-+void eip93_unmap_dma(struct eip93_device *mtk, struct eip93_cipher_reqctx *rctx,
-+                   struct scatterlist *reqsrc, struct scatterlist *reqdst);
-+
-+void eip93_skcipher_handle_result(struct crypto_async_request *async, int err);
-+
-+int eip93_send_req(struct crypto_async_request *async,
-+                 const u8 *reqiv, struct eip93_cipher_reqctx *rctx);
-+
-+void eip93_handle_result(struct eip93_device *mtk, struct eip93_cipher_reqctx *rctx,
-+                       u8 *reqiv);
-+
-+#endif /* _EIP93_CIPHER_H_ */
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-common.c
-@@ -0,0 +1,824 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2019 - 2021
-+ *
-+ * Richard van Schagen <vschagen@icloud.com>
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+
-+#include <crypto/aes.h>
-+#include <crypto/ctr.h>
-+#include <crypto/hmac.h>
-+#include <crypto/sha1.h>
-+#include <crypto/sha2.h>
-+#include <linux/kernel.h>
-+#include <linux/delay.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/scatterlist.h>
-+
-+#include "eip93-cipher.h"
-+#include "eip93-hash.h"
-+#include "eip93-common.h"
-+#include "eip93-main.h"
-+#include "eip93-regs.h"
-+
-+int eip93_parse_ctrl_stat_err(struct eip93_device *mtk, int err)
-+{
-+      u32 ext_err;
-+
-+      if (!err)
-+              return 0;
-+
-+      switch (err & ~EIP93_PE_CTRL_PE_EXT_ERR_CODE) {
-+      case EIP93_PE_CTRL_PE_AUTH_ERR:
-+      case EIP93_PE_CTRL_PE_PAD_ERR:
-+              return -EBADMSG;
-+      /* let software handle anti-replay errors */
-+      case EIP93_PE_CTRL_PE_SEQNUM_ERR:
-+              return 0;
-+      case EIP93_PE_CTRL_PE_EXT_ERR:
-+              break;
-+      default:
-+              dev_err(mtk->dev, "Unhandled error 0x%08x\n", err);
-+              return -EINVAL;
-+      }
-+
-+      /* Parse additional ext errors */
-+      ext_err = FIELD_GET(EIP93_PE_CTRL_PE_EXT_ERR_CODE, err);
-+      switch (ext_err) {
-+      case EIP93_PE_CTRL_PE_EXT_ERR_BUS:
-+      case EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING:
-+              return -EIO;
-+      case EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER:
-+              return -EACCES;
-+      case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP:
-+      case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO:
-+      case EIP93_PE_CTRL_PE_EXT_ERR_SPI:
-+              return -EINVAL;
-+      case EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH:
-+      case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH:
-+      case EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR:
-+              return -EBADMSG;
-+      default:
-+              dev_err(mtk->dev, "Unhandled ext error 0x%08x\n", ext_err);
-+              return -EINVAL;
-+      }
-+}
-+
-+static void *eip93_ring_next_wptr(struct eip93_device *mtk,
-+                                struct eip93_desc_ring *ring)
-+{
-+      void *ptr = ring->write;
-+
-+      if ((ring->write == ring->read - ring->offset) ||
-+          (ring->read == ring->base && ring->write == ring->base_end))
-+              return ERR_PTR(-ENOMEM);
-+
-+      if (ring->write == ring->base_end)
-+              ring->write = ring->base;
-+      else
-+              ring->write += ring->offset;
-+
-+      return ptr;
-+}
-+
-+static void *eip93_ring_next_rptr(struct eip93_device *mtk,
-+                                struct eip93_desc_ring *ring)
-+{
-+      void *ptr = ring->read;
-+
-+      if (ring->write == ring->read)
-+              return ERR_PTR(-ENOENT);
-+
-+      if (ring->read == ring->base_end)
-+              ring->read = ring->base;
-+      else
-+              ring->read += ring->offset;
-+
-+      return ptr;
-+}
-+
-+int eip93_put_descriptor(struct eip93_device *mtk,
-+                       struct eip93_descriptor *desc)
-+{
-+      struct eip93_descriptor *cdesc;
-+      struct eip93_descriptor *rdesc;
-+
-+      guard(spinlock_irqsave)(&mtk->ring->write_lock);
-+
-+      rdesc = eip93_ring_next_wptr(mtk, &mtk->ring->rdr);
-+
-+      if (IS_ERR(rdesc))
-+              return -ENOENT;
-+
-+      cdesc = eip93_ring_next_wptr(mtk, &mtk->ring->cdr);
-+      if (IS_ERR(cdesc))
-+              return -ENOENT;
-+
-+      memset(rdesc, 0, sizeof(struct eip93_descriptor));
-+
-+      memcpy(cdesc, desc, sizeof(struct eip93_descriptor));
-+
-+      atomic_dec(&mtk->ring->free);
-+
-+      return 0;
-+}
-+
-+void *eip93_get_descriptor(struct eip93_device *mtk)
-+{
-+      struct eip93_descriptor *cdesc;
-+      void *ptr;
-+
-+      guard(spinlock_irqsave)(&mtk->ring->read_lock);
-+
-+      cdesc = eip93_ring_next_rptr(mtk, &mtk->ring->cdr);
-+      if (IS_ERR(cdesc))
-+              return ERR_PTR(-ENOENT);
-+
-+      memset(cdesc, 0, sizeof(struct eip93_descriptor));
-+
-+      ptr = eip93_ring_next_rptr(mtk, &mtk->ring->rdr);
-+      if (IS_ERR(ptr))
-+              return ERR_PTR(-ENOENT);
-+
-+      atomic_inc(&mtk->ring->free);
-+
-+      return ptr;
-+}
-+
-+static void eip93_free_sg_copy(const int len, struct scatterlist **sg)
-+{
-+      if (!*sg || !len)
-+              return;
-+
-+      free_pages((unsigned long)sg_virt(*sg), get_order(len));
-+      kfree(*sg);
-+      *sg = NULL;
-+}
-+
-+static int eip93_make_sg_copy(struct scatterlist *src, struct scatterlist **dst,
-+                            const u32 len, const bool copy)
-+{
-+      void *pages;
-+
-+      *dst = kmalloc(sizeof(**dst), GFP_KERNEL);
-+      if (!*dst)
-+              return -ENOMEM;
-+
-+      pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA,
-+                                       get_order(len));
-+      if (!pages) {
-+              kfree(*dst);
-+              *dst = NULL;
-+              return -ENOMEM;
-+      }
-+
-+      sg_init_table(*dst, 1);
-+      sg_set_buf(*dst, pages, len);
-+
-+      /* copy only as requested */
-+      if (copy)
-+              sg_copy_to_buffer(src, sg_nents(src), pages, len);
-+
-+      return 0;
-+}
-+
-+static bool eip93_is_sg_aligned(struct scatterlist *sg, u32 len,
-+                              const int blksize)
-+{
-+      int nents;
-+
-+      for (nents = 0; sg; sg = sg_next(sg), ++nents) {
-+              if (!IS_ALIGNED(sg->offset, 4))
-+                      return false;
-+
-+              if (len <= sg->length) {
-+                      if (!IS_ALIGNED(len, blksize))
-+                              return false;
-+
-+                      return true;
-+              }
-+
-+              if (!IS_ALIGNED(sg->length, blksize))
-+                      return false;
-+
-+              len -= sg->length;
-+      }
-+      return false;
-+}
-+
-+int check_valid_request(struct eip93_cipher_reqctx *rctx)
-+{
-+      struct scatterlist *src = rctx->sg_src;
-+      struct scatterlist *dst = rctx->sg_dst;
-+      u32 src_nents, dst_nents;
-+      u32 textsize = rctx->textsize;
-+      u32 authsize = rctx->authsize;
-+      u32 blksize = rctx->blksize;
-+      u32 totlen_src = rctx->assoclen + rctx->textsize;
-+      u32 totlen_dst = rctx->assoclen + rctx->textsize;
-+      u32 copy_len;
-+      bool src_align, dst_align;
-+      int err = -EINVAL;
-+
-+      if (!IS_CTR(rctx->flags)) {
-+              if (!IS_ALIGNED(textsize, blksize))
-+                      return err;
-+      }
-+
-+      if (authsize) {
-+              if (IS_ENCRYPT(rctx->flags))
-+                      totlen_dst += authsize;
-+              else
-+                      totlen_src += authsize;
-+      }
-+
-+      src_nents = sg_nents_for_len(src, totlen_src);
-+      dst_nents = sg_nents_for_len(dst, totlen_dst);
-+
-+      if (src == dst) {
-+              src_nents = max(src_nents, dst_nents);
-+              dst_nents = src_nents;
-+              if (unlikely((totlen_src || totlen_dst) && src_nents <= 0))
-+                      return err;
-+
-+      } else {
-+              if (unlikely(totlen_src && src_nents <= 0))
-+                      return err;
-+
-+              if (unlikely(totlen_dst && dst_nents <= 0))
-+                      return err;
-+      }
-+
-+      if (authsize) {
-+              if (dst_nents == 1 && src_nents == 1) {
-+                      src_align = eip93_is_sg_aligned(src, totlen_src, blksize);
-+                      if (src ==  dst)
-+                              dst_align = src_align;
-+                      else
-+                              dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);
-+              } else {
-+                      src_align = false;
-+                      dst_align = false;
-+              }
-+      } else {
-+              src_align = eip93_is_sg_aligned(src, totlen_src, blksize);
-+              if (src == dst)
-+                      dst_align = src_align;
-+              else
-+                      dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);
-+      }
-+
-+      copy_len = max(totlen_src, totlen_dst);
-+      if (!src_align) {
-+              err = eip93_make_sg_copy(src, &rctx->sg_src, copy_len, true);
-+              if (err)
-+                      return err;
-+      }
-+
-+      if (!dst_align) {
-+              err = eip93_make_sg_copy(dst, &rctx->sg_dst, copy_len, false);
-+              if (err)
-+                      return err;
-+      }
-+
-+      rctx->src_nents = sg_nents_for_len(rctx->sg_src, totlen_src);
-+      rctx->dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst);
-+
-+      return 0;
-+}
-+
-+/*
-+ * Set sa_record function:
-+ * Even sa_record is set to "0", keep " = 0" for readability.
-+ */
-+void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen,
-+                       const u32 flags)
-+{
-+      /* Reset cmd word */
-+      sa_record->sa_cmd0_word = 0;
-+      sa_record->sa_cmd1_word = 0;
-+
-+      sa_record->sa_cmd0_word |= EIP93_SA_CMD_IV_FROM_STATE;
-+      if (!IS_ECB(flags))
-+              sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_IV;
-+
-+      sa_record->sa_cmd0_word |= EIP93_SA_CMD_OP_BASIC;
-+
-+      switch ((flags & EIP93_ALG_MASK)) {
-+      case EIP93_ALG_AES:
-+              sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_AES;
-+              sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH,
-+                                                    keylen >> 3);
-+              break;
-+      case EIP93_ALG_3DES:
-+              sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_3DES;
-+              break;
-+      case EIP93_ALG_DES:
-+              sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_DES;
-+              break;
-+      default:
-+              sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_NULL;
-+      }
-+
-+      switch ((flags & EIP93_HASH_MASK)) {
-+      case EIP93_HASH_SHA256:
-+              sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA256;
-+              break;
-+      case EIP93_HASH_SHA224:
-+              sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA224;
-+              break;
-+      case EIP93_HASH_SHA1:
-+              sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA1;
-+              break;
-+      case EIP93_HASH_MD5:
-+              sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_MD5;
-+              break;
-+      default:
-+              sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_NULL;
-+      }
-+
-+      sa_record->sa_cmd0_word |= EIP93_SA_CMD_PAD_ZERO;
-+
-+      switch ((flags & EIP93_MODE_MASK)) {
-+      case EIP93_MODE_CBC:
-+              sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CBC;
-+              break;
-+      case EIP93_MODE_CTR:
-+              sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CTR;
-+              break;
-+      case EIP93_MODE_ECB:
-+              sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_ECB;
-+              break;
-+      }
-+
-+      sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIGEST_3WORD;
-+      if (IS_HASH(flags)) {
-+              sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_PAD;
-+              sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_DIGEST;
-+      }
-+
-+      if (IS_HMAC(flags)) {
-+              sa_record->sa_cmd1_word |= EIP93_SA_CMD_HMAC;
-+              sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_HEADER;
-+      }
-+
-+      sa_record->sa_spi = 0x0;
-+      sa_record->sa_seqmum_mask[0] = 0xFFFFFFFF;
-+      sa_record->sa_seqmum_mask[1] = 0x0;
-+}
-+
-+/*
-+ * Poor mans Scatter/gather function:
-+ * Create a Descriptor for every segment to avoid copying buffers.
-+ * For performance better to wait for hardware to perform multiple DMA
-+ */
-+static int eip93_scatter_combine(struct eip93_device *mtk,
-+                               struct eip93_cipher_reqctx *rctx,
-+                               u32 datalen, u32 split, int offsetin)
-+{
-+      struct eip93_descriptor *cdesc = rctx->cdesc;
-+      struct scatterlist *sgsrc = rctx->sg_src;
-+      struct scatterlist *sgdst = rctx->sg_dst;
-+      unsigned int remainin = sg_dma_len(sgsrc);
-+      unsigned int remainout = sg_dma_len(sgdst);
-+      dma_addr_t saddr = sg_dma_address(sgsrc);
-+      dma_addr_t daddr = sg_dma_address(sgdst);
-+      dma_addr_t state_addr;
-+      u32 src_addr, dst_addr, len, n;
-+      bool nextin = false;
-+      bool nextout = false;
-+      int offsetout = 0;
-+      int ndesc_cdr = 0, err;
-+
-+      if (IS_ECB(rctx->flags))
-+              rctx->sa_state_base = 0;
-+
-+      if (split < datalen) {
-+              state_addr = rctx->sa_state_ctr_base;
-+              n = split;
-+      } else {
-+              state_addr = rctx->sa_state_base;
-+              n = datalen;
-+      }
-+
-+      do {
-+              if (nextin) {
-+                      sgsrc = sg_next(sgsrc);
-+                      remainin = sg_dma_len(sgsrc);
-+                      if (remainin == 0)
-+                              continue;
-+
-+                      saddr = sg_dma_address(sgsrc);
-+                      offsetin = 0;
-+                      nextin = false;
-+              }
-+
-+              if (nextout) {
-+                      sgdst = sg_next(sgdst);
-+                      remainout = sg_dma_len(sgdst);
-+                      if (remainout == 0)
-+                              continue;
-+
-+                      daddr = sg_dma_address(sgdst);
-+                      offsetout = 0;
-+                      nextout = false;
-+              }
-+              src_addr = saddr + offsetin;
-+              dst_addr = daddr + offsetout;
-+
-+              if (remainin == remainout) {
-+                      len = remainin;
-+                      if (len > n) {
-+                              len = n;
-+                              remainin -= n;
-+                              remainout -= n;
-+                              offsetin += n;
-+                              offsetout += n;
-+                      } else {
-+                              nextin = true;
-+                              nextout = true;
-+                      }
-+              } else if (remainin < remainout) {
-+                      len = remainin;
-+                      if (len > n) {
-+                              len = n;
-+                              remainin -= n;
-+                              remainout -= n;
-+                              offsetin += n;
-+                              offsetout += n;
-+                      } else {
-+                              offsetout += len;
-+                              remainout -= len;
-+                              nextin = true;
-+                      }
-+              } else {
-+                      len = remainout;
-+                      if (len > n) {
-+                              len = n;
-+                              remainin -= n;
-+                              remainout -= n;
-+                              offsetin += n;
-+                              offsetout += n;
-+                      } else {
-+                              offsetin += len;
-+                              remainin -= len;
-+                              nextout = true;
-+                      }
-+              }
-+              n -= len;
-+
-+              cdesc->src_addr = src_addr;
-+              cdesc->dst_addr = dst_addr;
-+              cdesc->state_addr = state_addr;
-+              cdesc->pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
-+                                                 EIP93_PE_LENGTH_HOST_READY);
-+              cdesc->pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, len);
-+
-+              if (n == 0) {
-+                      n = datalen - split;
-+                      split = datalen;
-+                      state_addr = rctx->sa_state_base;
-+              }
-+
-+              if (n == 0)
-+                      cdesc->user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS,
-+                                                   EIP93_DESC_LAST);
-+
-+              /*
-+               * Loop - Delay - No need to rollback
-+               * Maybe refine by slowing down at EIP93_RING_BUSY
-+               */
-+again:
-+              err = eip93_put_descriptor(mtk, cdesc);
-+              if (err) {
-+                      usleep_range(EIP93_RING_BUSY_DELAY,
-+                                   EIP93_RING_BUSY_DELAY * 2);
-+                      goto again;
-+              }
-+              /* Writing new descriptor count starts DMA action */
-+              writel(1, mtk->base + EIP93_REG_PE_CD_COUNT);
-+
-+              ndesc_cdr++;
-+      } while (n);
-+
-+      return -EINPROGRESS;
-+}
-+
-+int eip93_send_req(struct crypto_async_request *async,
-+                 const u8 *reqiv, struct eip93_cipher_reqctx *rctx)
-+{
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
-+      struct eip93_device *mtk = ctx->mtk;
-+      struct scatterlist *src = rctx->sg_src;
-+      struct scatterlist *dst = rctx->sg_dst;
-+      struct sa_state *sa_state;
-+      struct eip93_descriptor cdesc;
-+      u32 flags = rctx->flags;
-+      int offsetin = 0, err;
-+      u32 datalen = rctx->assoclen + rctx->textsize;
-+      u32 split = datalen;
-+      u32 start, end, ctr, blocks;
-+      u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
-+      int crypto_async_idr;
-+
-+      rctx->sa_state_ctr = NULL;
-+      rctx->sa_state = NULL;
-+
-+      if (IS_ECB(flags))
-+              goto skip_iv;
-+
-+      memcpy(iv, reqiv, rctx->ivsize);
-+
-+      rctx->sa_state = kzalloc(sizeof(*rctx->sa_state), GFP_KERNEL);
-+      if (!rctx->sa_state)
-+              return -ENOMEM;
-+
-+      sa_state = rctx->sa_state;
-+
-+      memcpy(sa_state->state_iv, iv, rctx->ivsize);
-+      if (IS_RFC3686(flags)) {
-+              sa_state->state_iv[0] = ctx->sa_nonce;
-+              sa_state->state_iv[1] = iv[0];
-+              sa_state->state_iv[2] = iv[1];
-+              sa_state->state_iv[3] = cpu_to_be32(1);
-+      } else if (!IS_HMAC(flags) && IS_CTR(flags)) {
-+              /* Compute data length. */
-+              blocks = DIV_ROUND_UP(rctx->textsize, AES_BLOCK_SIZE);
-+              ctr = be32_to_cpu(iv[3]);
-+              /* Check 32bit counter overflow. */
-+              start = ctr;
-+              end = start + blocks - 1;
-+              if (end < start) {
-+                      split = AES_BLOCK_SIZE * -start;
-+                      /*
-+                       * Increment the counter manually to cope with
-+                       * the hardware counter overflow.
-+                       */
-+                      iv[3] = 0xffffffff;
-+                      crypto_inc((u8 *)iv, AES_BLOCK_SIZE);
-+
-+                      rctx->sa_state_ctr = kzalloc(sizeof(*rctx->sa_state_ctr),
-+                                                   GFP_KERNEL);
-+                      if (!rctx->sa_state_ctr)
-+                              goto free_sa_state;
-+
-+                      memcpy(rctx->sa_state_ctr->state_iv, reqiv, rctx->ivsize);
-+                      memcpy(sa_state->state_iv, iv, rctx->ivsize);
-+
-+                      rctx->sa_state_ctr_base = dma_map_single(mtk->dev, rctx->sa_state_ctr,
-+                                                               sizeof(*rctx->sa_state_ctr),
-+                                                               DMA_TO_DEVICE);
-+                      err = dma_mapping_error(mtk->dev, rctx->sa_state_ctr_base);
-+                      if (err)
-+                              goto free_sa_state_ctr;
-+              }
-+      }
-+
-+      rctx->sa_state_base = dma_map_single(mtk->dev, rctx->sa_state,
-+                                           sizeof(*rctx->sa_state), DMA_TO_DEVICE);
-+      err = dma_mapping_error(mtk->dev, rctx->sa_state_base);
-+      if (err)
-+              goto free_sa_state_ctr_dma;
-+
-+skip_iv:
-+
-+      cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
-+                                           EIP93_PE_CTRL_HOST_READY);
-+      cdesc.sa_addr = rctx->sa_record_base;
-+      cdesc.arc4_addr = 0;
-+
-+      scoped_guard(spinlock_bh, &mtk->ring->idr_lock)
-+              crypto_async_idr = idr_alloc(&mtk->ring->crypto_async_idr, async, 0,
-+                                           EIP93_RING_NUM - 1, GFP_ATOMIC);
-+
-+      cdesc.user_id = FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
-+                      FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, rctx->desc_flags);
-+
-+      rctx->cdesc = &cdesc;
-+
-+      /* map DMA_BIDIRECTIONAL to invalidate cache on destination
-+       * implies __dma_cache_wback_inv
-+       */
-+      if (!dma_map_sg(mtk->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL)) {
-+              err = -ENOMEM;
-+              goto free_sa_state_ctr_dma;
-+      }
-+
-+      if (src != dst &&
-+          !dma_map_sg(mtk->dev, src, rctx->src_nents, DMA_TO_DEVICE)) {
-+              err = -ENOMEM;
-+              goto free_sg_dma;
-+      }
-+
-+      return eip93_scatter_combine(mtk, rctx, datalen, split, offsetin);
-+
-+free_sg_dma:
-+      dma_unmap_sg(mtk->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL);
-+free_sa_state_ctr_dma:
-+      if (rctx->sa_state_ctr)
-+              dma_unmap_single(mtk->dev, rctx->sa_state_ctr_base,
-+                               sizeof(*rctx->sa_state_ctr),
-+                               DMA_TO_DEVICE);
-+free_sa_state_ctr:
-+      kfree(rctx->sa_state_ctr);
-+      if (rctx->sa_state)
-+              dma_unmap_single(mtk->dev, rctx->sa_state_base,
-+                               sizeof(*rctx->sa_state),
-+                               DMA_TO_DEVICE);
-+free_sa_state:
-+      kfree(rctx->sa_state);
-+
-+      return err;
-+}
-+
-+void eip93_unmap_dma(struct eip93_device *mtk, struct eip93_cipher_reqctx *rctx,
-+                   struct scatterlist *reqsrc, struct scatterlist *reqdst)
-+{
-+      u32 len = rctx->assoclen + rctx->textsize;
-+      u32 authsize = rctx->authsize;
-+      u32 flags = rctx->flags;
-+      u32 *otag;
-+      int i;
-+
-+      if (rctx->sg_src == rctx->sg_dst) {
-+              dma_unmap_sg(mtk->dev, rctx->sg_dst, rctx->dst_nents,
-+                           DMA_BIDIRECTIONAL);
-+              goto process_tag;
-+      }
-+
-+      dma_unmap_sg(mtk->dev, rctx->sg_src, rctx->src_nents,
-+                   DMA_TO_DEVICE);
-+
-+      if (rctx->sg_src != reqsrc)
-+              eip93_free_sg_copy(len +  rctx->authsize, &rctx->sg_src);
-+
-+      dma_unmap_sg(mtk->dev, rctx->sg_dst, rctx->dst_nents,
-+                   DMA_BIDIRECTIONAL);
-+
-+      /* SHA tags need conversion from net-to-host */
-+process_tag:
-+      if (IS_DECRYPT(flags))
-+              authsize = 0;
-+
-+      if (authsize) {
-+              if (!IS_HASH_MD5(flags)) {
-+                      otag = sg_virt(rctx->sg_dst) + len;
-+                      for (i = 0; i < (authsize / 4); i++)
-+                              otag[i] = be32_to_cpu(otag[i]);
-+              }
-+      }
-+
-+      if (rctx->sg_dst != reqdst) {
-+              sg_copy_from_buffer(reqdst, sg_nents(reqdst),
-+                                  sg_virt(rctx->sg_dst), len + authsize);
-+              eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_dst);
-+      }
-+}
-+
-+void eip93_handle_result(struct eip93_device *mtk, struct eip93_cipher_reqctx *rctx,
-+                       u8 *reqiv)
-+{
-+      if (rctx->sa_state_ctr)
-+              dma_unmap_single(mtk->dev, rctx->sa_state_ctr_base,
-+                               sizeof(*rctx->sa_state_ctr),
-+                               DMA_FROM_DEVICE);
-+
-+      if (rctx->sa_state)
-+              dma_unmap_single(mtk->dev, rctx->sa_state_base,
-+                               sizeof(*rctx->sa_state),
-+                               DMA_FROM_DEVICE);
-+
-+      if (!IS_ECB(rctx->flags))
-+              memcpy(reqiv, rctx->sa_state->state_iv, rctx->ivsize);
-+
-+      kfree(rctx->sa_state_ctr);
-+      kfree(rctx->sa_state);
-+}
-+
-+/* basically this is set hmac - key */
-+int eip93_authenc_setkey(struct crypto_aead *aead, struct sa_record *sa,
-+                       const u8 *authkey, unsigned int authkeylen)
-+{
-+      struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-+      struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-+      struct crypto_ahash *ahash_tfm;
-+      struct eip93_hash_reqctx *rctx;
-+      struct scatterlist sg[1];
-+      struct ahash_request *req;
-+      DECLARE_CRYPTO_WAIT(wait);
-+      const char *alg_name;
-+      u8 *ipad, *opad;
-+      int i, ret;
-+
-+      switch ((ctx->flags & EIP93_HASH_MASK)) {
-+      case EIP93_HASH_SHA256:
-+              alg_name = "sha256-eip93";
-+              break;
-+      case EIP93_HASH_SHA224:
-+              alg_name = "sha224-eip93";
-+              break;
-+      case EIP93_HASH_SHA1:
-+              alg_name = "sha1-eip93";
-+              break;
-+      case EIP93_HASH_MD5:
-+              alg_name = "md5-eip93";
-+              break;
-+      default: /* Impossible */
-+              return -EINVAL;
-+      }
-+
-+      ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
-+      if (IS_ERR(ahash_tfm))
-+              return PTR_ERR(ahash_tfm);
-+
-+      req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
-+      if (!req) {
-+              ret = -ENOMEM;
-+              goto err_ahash;
-+      }
-+
-+      ipad = kcalloc(2, SHA256_BLOCK_SIZE, GFP_KERNEL);
-+      if (!ipad) {
-+              ret = -ENOMEM;
-+              goto err_req;
-+      }
-+      opad = ipad + SHA256_BLOCK_SIZE;
-+
-+      rctx = ahash_request_ctx(req);
-+      crypto_init_wait(&wait);
-+      ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-+                                 crypto_req_done, &wait);
-+
-+      /* Hash the key if > SHA256_BLOCK_SIZE */
-+      if (authkeylen > SHA256_BLOCK_SIZE) {
-+              sg_init_one(&sg[0], authkey, authkeylen);
-+
-+              ahash_request_set_crypt(req, sg, ipad, authkeylen);
-+              ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
-+
-+              authkeylen = ctx->authsize;
-+      } else {
-+              memcpy(ipad, authkey, authkeylen);
-+      }
-+
-+      /* Copy to opad */
-+      memset(ipad + authkeylen, 0, SHA256_BLOCK_SIZE - authkeylen);
-+      memcpy(opad, ipad, SHA256_BLOCK_SIZE);
-+
-+      /* Pad with HMAC constants */
-+      for (i = 0; i < SHA256_BLOCK_SIZE; i++) {
-+              ipad[i] ^= HMAC_IPAD_VALUE;
-+              opad[i] ^= HMAC_OPAD_VALUE;
-+      }
-+
-+      /* Disable HASH_FINALIZE for ipad and opad hash */
-+      rctx->no_finalize = true;
-+
-+      /* Hash ipad */
-+      sg_init_one(&sg[0], ipad, SHA256_BLOCK_SIZE);
-+      ahash_request_set_crypt(req, sg, sa->sa_i_digest, SHA256_BLOCK_SIZE);
-+      ret = crypto_ahash_init(req);
-+      if (ret)
-+              goto exit;
-+
-+      /* Disable HASH_FINALIZE for ipad hash */
-+      rctx->no_finalize = true;
-+
-+      ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
-+      if (ret)
-+              goto exit;
-+
-+      /* Hash opad */
-+      sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE);
-+      ahash_request_set_crypt(req, sg, sa->sa_o_digest, SHA256_BLOCK_SIZE);
-+      ret = crypto_ahash_init(req);
-+      if (ret)
-+              goto exit;
-+
-+      /* Disable HASH_FINALIZE for opad hash */
-+      rctx->no_finalize = true;
-+
-+      ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
-+      if (ret)
-+              goto exit;
-+
-+      if (!IS_HASH_MD5(ctx->flags)) {
-+              for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) {
-+                      u32 *ipad_hash = (u32 *)sa->sa_i_digest;
-+                      u32 *opad_hash = (u32 *)sa->sa_o_digest;
-+
-+                      ipad_hash[i] = cpu_to_be32(ipad_hash[i]);
-+                      opad_hash[i] = cpu_to_be32(opad_hash[i]);
-+              }
-+      }
-+
-+exit:
-+      kfree(ipad);
-+err_req:
-+      ahash_request_free(req);
-+err_ahash:
-+      crypto_free_ahash(ahash_tfm);
-+
-+      return ret;
-+}
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-common.h
-@@ -0,0 +1,25 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2019 - 2021
-+ *
-+ * Richard van Schagen <vschagen@icloud.com>
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+
-+#ifndef _EIP93_COMMON_H_
-+#define _EIP93_COMMON_H_
-+
-+#include "eip93-main.h"
-+
-+void *eip93_get_descriptor(struct eip93_device *mtk);
-+int eip93_put_descriptor(struct eip93_device *mtk, struct eip93_descriptor *desc);
-+
-+void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen,
-+                       const u32 flags);
-+
-+int eip93_parse_ctrl_stat_err(struct eip93_device *mtk, int err);
-+
-+int eip93_authenc_setkey(struct crypto_aead *aead, struct sa_record *sa,
-+                       const u8 *authkey, unsigned int authkeylen);
-+
-+#endif /* _EIP93_COMMON_H_ */
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-des.h
-@@ -0,0 +1,16 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2019 - 2021
-+ *
-+ * Richard van Schagen <vschagen@icloud.com>
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+#ifndef _EIP93_DES_H_
-+#define _EIP93_DES_H_
-+
-+extern struct eip93_alg_template eip93_alg_ecb_des;
-+extern struct eip93_alg_template eip93_alg_cbc_des;
-+extern struct eip93_alg_template eip93_alg_ecb_des3_ede;
-+extern struct eip93_alg_template eip93_alg_cbc_des3_ede;
-+
-+#endif /* _EIP93_DES_H_ */
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c
-@@ -0,0 +1,909 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2024
-+ *
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+
-+#include <crypto/sha1.h>
-+#include <crypto/sha2.h>
-+#include <crypto/md5.h>
-+#include <crypto/hmac.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/delay.h>
-+
-+#include "eip93-cipher.h"
-+#include "eip93-hash.h"
-+#include "eip93-main.h"
-+#include "eip93-common.h"
-+#include "eip93-regs.h"
-+
-+static void eip93_hash_free_data_blocks(struct ahash_request *req)
-+{
-+      struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
-+      struct mkt_hash_block *block;
-+
-+      list_for_each_entry(block, &rctx->blocks, list) {
-+              dma_unmap_single(rctx->mtk->dev, block->data_dma,
-+                               SHA256_BLOCK_SIZE, DMA_TO_DEVICE);
-+              kfree(block);
-+      }
-+}
-+
-+static void eip93_hash_free_sa_record(struct ahash_request *req)
-+{
-+      struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
-+      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+      struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+
-+      if (IS_HMAC(ctx->flags)) {
-+              dma_unmap_single(rctx->mtk->dev, rctx->sa_record_hmac_base,
-+                               sizeof(*rctx->sa_record_hmac), DMA_TO_DEVICE);
-+              kfree(rctx->sa_record_hmac);
-+      }
-+
-+      dma_unmap_single(rctx->mtk->dev, rctx->sa_record_base,
-+                       sizeof(*rctx->sa_record), DMA_TO_DEVICE);
-+      kfree(rctx->sa_record);
-+}
-+
-+static void eip93_hash_free_sa_state(struct ahash_request *req)
-+{
-+      struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
-+
-+      dma_unmap_single(rctx->mtk->dev, rctx->sa_state_base,
-+                       sizeof(*rctx->sa_state), DMA_TO_DEVICE);
-+      kfree(rctx->sa_state);
-+}
-+
-+static struct sa_state *eip93_hash_get_sa_state(struct ahash_request *req,
-+                                              dma_addr_t *sa_state_base)
-+{
-+      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+      struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+      struct eip93_device *mtk = ctx->mtk;
-+      struct sa_state *sa_state;
-+      int ret;
-+
-+      sa_state = kzalloc(sizeof(*sa_state), GFP_KERNEL);
-+      if (!sa_state)
-+              return ERR_PTR(-ENOMEM);
-+
-+      /* Init HASH constant */
-+      switch ((ctx->flags & EIP93_HASH_MASK)) {
-+      case EIP93_HASH_SHA256:
-+              u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
-+                              SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 };
-+
-+              memcpy(sa_state->state_i_digest, sha256_init, sizeof(sha256_init));
-+              break;
-+      case EIP93_HASH_SHA224:
-+              u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
-+                              SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 };
-+
-+              memcpy(sa_state->state_i_digest, sha224_init, sizeof(sha224_init));
-+              break;
-+      case EIP93_HASH_SHA1:
-+              u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
-+
-+              memcpy(sa_state->state_i_digest, sha1_init, sizeof(sha1_init));
-+              break;
-+      case EIP93_HASH_MD5:
-+              u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 };
-+
-+              memcpy(sa_state->state_i_digest, md5_init, sizeof(md5_init));
-+              break;
-+      default: /* Impossible */
-+              return ERR_PTR(-ENOMEM);
-+      }
-+
-+      *sa_state_base = dma_map_single(mtk->dev, sa_state,
-+                                      sizeof(*sa_state), DMA_TO_DEVICE);
-+      ret = dma_mapping_error(mtk->dev, *sa_state_base);
-+      if (ret) {
-+              kfree(sa_state);
-+              return ERR_PTR(ret);
-+      }
-+
-+      return sa_state;
-+}
-+
-+static int _eip93_hash_init(struct ahash_request *req, struct sa_state *sa_state,
-+                          dma_addr_t sa_state_base)
-+{
-+      struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
-+      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+      struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+      struct sa_record *sa_record, *sa_record_hmac;
-+      struct eip93_device *mtk = rctx->mtk;
-+      int digestsize;
-+      int ret;
-+
-+      sa_record = kzalloc(sizeof(*sa_record), GFP_KERNEL);
-+      if (!sa_record)
-+              return -ENOMEM;
-+
-+      if (IS_HMAC(ctx->flags)) {
-+              sa_record_hmac = kzalloc(sizeof(*sa_record_hmac), GFP_KERNEL);
-+              if (!sa_record_hmac) {
-+                      ret = -ENOMEM;
-+                      goto free_sa_record;
-+              }
-+      }
-+
-+      digestsize = crypto_ahash_digestsize(ahash);
-+
-+      eip93_set_sa_record(sa_record, 0, ctx->flags);
-+      sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_FROM_STATE;
-+      sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_HASH;
-+      sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE;
-+      sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE,
-+                                            EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH);
-+      sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
-+      sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
-+                                            digestsize / sizeof(u32));
-+
-+      /*
-+       * HMAC special handling
-+       * Enabling CMD_HMAC force the inner hash to be always finalized.
-+       * This cause problems on handling message > 64 byte as we
-+       * need to produce intermediate inner hash on sending intermediate
-+       * 64 bytes blocks.
-+       *
-+       * To handle this, enable CMD_HMAC only on the last block.
-+       * We make a duplicate of sa_record and on the last descriptor,
-+       * we pass a dedicated sa_record with CMD_HMAC enabled to make
-+       * EIP93 apply the outer hash.
-+       */
-+      if (IS_HMAC(ctx->flags)) {
-+              memcpy(sa_record_hmac, sa_record, sizeof(*sa_record));
-+              /* Copy pre-hashed opad for HMAC */
-+              memcpy(sa_record_hmac->sa_o_digest, ctx->opad, SHA256_DIGEST_SIZE);
-+
-+              /* Disable HMAC for hash normal sa_record */
-+              sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HMAC;
-+      }
-+
-+      rctx->mtk = ctx->mtk;
-+      rctx->sa_record = sa_record;
-+      rctx->sa_record_base = dma_map_single(mtk->dev, rctx->sa_record,
-+                                            sizeof(*rctx->sa_record),
-+                                            DMA_TO_DEVICE);
-+      ret = dma_mapping_error(mtk->dev, rctx->sa_record_base);
-+      if (ret)
-+              goto free_sa_record;
-+
-+      if (IS_HMAC(ctx->flags)) {
-+              rctx->sa_record_hmac = sa_record_hmac;
-+              rctx->sa_record_hmac_base = dma_map_single(mtk->dev,
-+                                                         rctx->sa_record_hmac,
-+                                                         sizeof(*rctx->sa_record_hmac),
-+                                                         DMA_TO_DEVICE);
-+              ret = dma_mapping_error(mtk->dev, rctx->sa_record_hmac_base);
-+              if (ret)
-+                      goto free_sa_record_base;
-+      }
-+
-+      rctx->sa_state = sa_state;
-+      rctx->sa_state_base = sa_state_base;
-+
-+      rctx->len = 0;
-+      rctx->left_last = 0;
-+      rctx->no_finalize = false;
-+      INIT_LIST_HEAD(&rctx->blocks);
-+
-+      return 0;
-+
-+free_sa_record_base:
-+      dma_unmap_single(mtk->dev, rctx->sa_record_base, sizeof(*rctx->sa_record),
-+                       DMA_TO_DEVICE);
-+free_sa_record:
-+      kfree(sa_record);
-+      return ret;
-+}
-+
-+static int eip93_hash_init(struct ahash_request *req)
-+{
-+      struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
-+      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+      struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+      struct sa_state *sa_state;
-+      dma_addr_t sa_state_base;
-+      int ret;
-+
-+      sa_state = eip93_hash_get_sa_state(req, &sa_state_base);
-+      if (IS_ERR(sa_state))
-+              return PTR_ERR(sa_state);
-+
-+      ret = _eip93_hash_init(req, sa_state, sa_state_base);
-+      if (ret)
-+              eip93_hash_free_sa_state(req);
-+
-+      /* For HMAC setup the initial block for ipad */
-+      if (IS_HMAC(ctx->flags)) {
-+              struct mkt_hash_block *block;
-+
-+              block = kzalloc(sizeof(*block), GFP_KERNEL);
-+              if (!block) {
-+                      eip93_hash_free_sa_record(req);
-+                      eip93_hash_free_sa_state(req);
-+                      return -ENOMEM;
-+              }
-+
-+              memcpy(block->data, ctx->ipad, SHA256_BLOCK_SIZE);
-+
-+              list_add(&block->list, &rctx->blocks);
-+
-+              rctx->len += SHA256_BLOCK_SIZE;
-+      }
-+
-+      return ret;
-+}
-+
-+static void eip93_send_hash_req(struct crypto_async_request *async, dma_addr_t src_addr,
-+                              u32 len, bool last)
-+{
-+      struct ahash_request *req = ahash_request_cast(async);
-+      struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
-+      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+      struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+      struct eip93_device *mtk = rctx->mtk;
-+      struct eip93_descriptor cdesc = { };
-+      int ret;
-+
-+      cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
-+                                           EIP93_PE_CTRL_HOST_READY);
-+      cdesc.sa_addr = rctx->sa_record_base;
-+      cdesc.arc4_addr = 0;
-+
-+      cdesc.state_addr = rctx->sa_state_base;
-+      cdesc.src_addr = src_addr;
-+      cdesc.pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
-+                                        EIP93_PE_LENGTH_HOST_READY);
-+      cdesc.pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH,
-+                                         len);
-+
-+      cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_HASH);
-+
-+      if (last) {
-+              int crypto_async_idr;
-+
-+              /* For last block, pass sa_record with CMD_HMAC enabled */
-+              if (IS_HMAC(ctx->flags))
-+                      cdesc.sa_addr = rctx->sa_record_hmac_base;
-+
-+              if (!rctx->no_finalize)
-+                      cdesc.pe_ctrl_stat_word |= EIP93_PE_CTRL_PE_HASH_FINAL;
-+
-+              scoped_guard(spinlock_bh, &mtk->ring->idr_lock)
-+                      crypto_async_idr = idr_alloc(&mtk->ring->crypto_async_idr, async, 0,
-+                                                   EIP93_RING_NUM - 1, GFP_ATOMIC);
-+
-+              cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
-+                               FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_LAST);
-+      }
-+
-+again:
-+      ret = eip93_put_descriptor(mtk, &cdesc);
-+      if (ret) {
-+              usleep_range(EIP93_RING_BUSY_DELAY,
-+                           EIP93_RING_BUSY_DELAY * 2);
-+              goto again;
-+      }
-+
-+      /* Writing new descriptor count starts DMA action */
-+      writel(1, mtk->base + EIP93_REG_PE_CD_COUNT);
-+}
-+
-+static int eip93_hash_update(struct ahash_request *req)
-+{
-+      struct crypto_async_request *async = &req->base;
-+      struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
-+      unsigned int to_consume = req->nbytes;
-+      struct eip93_device *mtk = rctx->mtk;
-+      struct mkt_hash_block *block;
-+      int read = 0;
-+      int ret;
-+
-+      /* If the request is 0 length, do nothing */
-+      if (!to_consume)
-+              return 0;
-+
-+      /*
-+       * Check if we are at a second iteration.
-+       * 1. Try to fill the first block to 64byte (if not already)
-+       * 2. Send full block (if we have more data to consume)
-+       */
-+      if (rctx->len > 0) {
-+              int offset = SHA256_BLOCK_SIZE - rctx->left_last;
-+
-+              block = list_first_entry(&rctx->blocks,
-+                                       struct mkt_hash_block, list);
-+
-+              /* Fill the first block */
-+              if (rctx->left_last) {
-+                      read += sg_pcopy_to_buffer(req->src, sg_nents(req->src),
-+                                                 block->data + offset,
-+                                                 min(to_consume, rctx->left_last),
-+                                                 0);
-+                      to_consume -= read;
-+                      rctx->left_last -= read;
-+              }
-+
-+              /* Send descriptor if we have more data to consume */
-+              if (to_consume > 0) {
-+                      block->data_dma = dma_map_single(mtk->dev, block->data,
-+                                                       SHA256_BLOCK_SIZE,
-+                                                       DMA_TO_DEVICE);
-+                      ret = dma_mapping_error(mtk->dev, block->data_dma);
-+                      if (ret)
-+                              return ret;
-+
-+                      eip93_send_hash_req(async, block->data_dma,
-+                                          SHA256_BLOCK_SIZE, false);
-+              }
-+      }
-+
-+      /*
-+       * Consume remaining data.
-+       * 1. Loop until we consume all the data in block of 64bytes
-+       * 2. Send full block of 64bytes
-+       * 3. Skip sending last block for future update() or for final() to
-+       *    enable HASH_FINALIZE bit.
-+       */
-+      while (to_consume > 0) {
-+              int to_read = min(to_consume, SHA256_BLOCK_SIZE);
-+
-+              block = kzalloc(sizeof(*block), GFP_KERNEL);
-+              if (!block)
-+                      return -ENOMEM;
-+
-+              read += sg_pcopy_to_buffer(req->src, sg_nents(req->src),
-+                                         block->data, to_read,
-+                                         read);
-+
-+              list_add(&block->list, &rctx->blocks);
-+
-+              to_consume -= to_read;
-+              rctx->left_last = SHA256_BLOCK_SIZE - to_read;
-+
-+              /* Send descriptor if we have more data to consume */
-+              if (to_consume > 0) {
-+                      block->data_dma = dma_map_single(mtk->dev, block->data,
-+                                                       SHA256_BLOCK_SIZE,
-+                                                       DMA_TO_DEVICE);
-+                      ret = dma_mapping_error(mtk->dev, block->data_dma);
-+                      if (ret)
-+                              return ret;
-+
-+                      eip93_send_hash_req(async, block->data_dma,
-+                                          SHA256_BLOCK_SIZE, false);
-+              }
-+      }
-+
-+      /*
-+       * Update counter with processed bytes.
-+       * This is also used to check if we are at the second iteration
-+       * of an update().
-+       */
-+      rctx->len += req->nbytes;
-+
-+      return 0;
-+}
-+
-+void eip93_hash_handle_result(struct crypto_async_request *async, int err)
-+{
-+      struct ahash_request *req = ahash_request_cast(async);
-+      struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
-+      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+      struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+      int digestsize = crypto_ahash_digestsize(ahash);
-+      struct sa_state *sa_state = rctx->sa_state;
-+      int i;
-+
-+      /* Unmap and sync sa_state for host */
-+      dma_unmap_single(rctx->mtk->dev, rctx->sa_state_base,
-+                       sizeof(*sa_state), DMA_FROM_DEVICE);
-+
-+      /*
-+       * With no_finalize assume SHA256_DIGEST_SIZE buffer is passed.
-+       * This is to handle SHA224 that have a 32 byte intermediate digest.
-+       */
-+      if (rctx->no_finalize)
-+              digestsize = SHA256_DIGEST_SIZE;
-+
-+      /* bytes needs to be swapped for req->result */
-+      if (!IS_HASH_MD5(ctx->flags)) {
-+              for (i = 0; i < digestsize / sizeof(u32); i++) {
-+                      u32 *digest = (u32 *)sa_state->state_i_digest;
-+
-+                      digest[i] = be32_to_cpu(digest[i]);
-+              }
-+      }
-+
-+      memcpy(req->result, sa_state->state_i_digest, digestsize);
-+
-+      kfree(sa_state);
-+      eip93_hash_free_data_blocks(req);
-+      eip93_hash_free_sa_record(req);
-+
-+      ahash_request_complete(req, err);
-+}
-+
-+static int eip93_hash_final(struct ahash_request *req)
-+{
-+      struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
-+      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+      struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+      struct crypto_async_request *async = &req->base;
-+      struct eip93_device *mtk = rctx->mtk;
-+      struct mkt_hash_block *block;
-+      int ret;
-+
-+      /* EIP93 can't handle zero bytes hash */
-+      if (!rctx->len && !IS_HMAC(ctx->flags)) {
-+              switch ((ctx->flags & EIP93_HASH_MASK)) {
-+              case EIP93_HASH_SHA256:
-+                      memcpy(req->result, sha256_zero_message_hash,
-+                             SHA256_DIGEST_SIZE);
-+                      break;
-+              case EIP93_HASH_SHA224:
-+                      memcpy(req->result, sha224_zero_message_hash,
-+                             SHA224_DIGEST_SIZE);
-+                      break;
-+              case EIP93_HASH_SHA1:
-+                      memcpy(req->result, sha1_zero_message_hash,
-+                             SHA1_DIGEST_SIZE);
-+                      break;
-+              case EIP93_HASH_MD5:
-+                      memcpy(req->result, md5_zero_message_hash,
-+                             MD5_DIGEST_SIZE);
-+                      break;
-+              default: /* Impossible */
-+                      return -EINVAL;
-+              }
-+
-+              eip93_hash_free_sa_state(req);
-+              eip93_hash_free_sa_record(req);
-+
-+              return 0;
-+      }
-+
-+      /* Send last block */
-+      block = list_first_entry(&rctx->blocks, struct mkt_hash_block, list);
-+
-+      block->data_dma = dma_map_single(mtk->dev, block->data,
-+                                       SHA256_BLOCK_SIZE, DMA_TO_DEVICE);
-+      ret = dma_mapping_error(mtk->dev, block->data_dma);
-+      if (ret)
-+              return ret;
-+
-+      eip93_send_hash_req(async, block->data_dma,
-+                          SHA256_BLOCK_SIZE - rctx->left_last,
-+                          true);
-+
-+      return -EINPROGRESS;
-+}
-+
-+static int eip93_hash_finup(struct ahash_request *req)
-+{
-+      int ret;
-+
-+      ret = eip93_hash_update(req);
-+      if (ret)
-+              return ret;
-+
-+      return eip93_hash_final(req);
-+}
-+
-+static int eip93_hash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
-+                                u32 keylen)
-+{
-+      unsigned int digestsize = crypto_ahash_digestsize(ahash);
-+      struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
-+      struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-+      struct crypto_ahash *ahash_tfm;
-+      struct eip93_hash_reqctx *rctx;
-+      struct scatterlist sg[1];
-+      struct ahash_request *req;
-+      DECLARE_CRYPTO_WAIT(wait);
-+      const char *alg_name;
-+      int i, ret = 0;
-+      u8 *opad;
-+
-+      switch ((ctx->flags & EIP93_HASH_MASK)) {
-+      case EIP93_HASH_SHA256:
-+              alg_name = "sha256-eip93";
-+              break;
-+      case EIP93_HASH_SHA224:
-+              alg_name = "sha224-eip93";
-+              break;
-+      case EIP93_HASH_SHA1:
-+              alg_name = "sha1-eip93";
-+              break;
-+      case EIP93_HASH_MD5:
-+              alg_name = "md5-eip93";
-+              break;
-+      default: /* Impossible */
-+              return -EINVAL;
-+      }
-+
-+      ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
-+      if (IS_ERR(ahash_tfm))
-+              return PTR_ERR(ahash_tfm);
-+
-+      req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
-+      if (!req) {
-+              ret = -ENOMEM;
-+              goto err_ahash;
-+      }
-+
-+      opad = kzalloc(SHA256_BLOCK_SIZE, GFP_KERNEL);
-+      if (!opad) {
-+              ret = -ENOMEM;
-+              goto err_req;
-+      }
-+
-+      rctx = ahash_request_ctx(req);
-+      crypto_init_wait(&wait);
-+      ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-+                                 crypto_req_done, &wait);
-+
-+      /* Hash the key if > SHA256_BLOCK_SIZE */
-+      if (keylen > SHA256_BLOCK_SIZE) {
-+              sg_init_one(&sg[0], key, keylen);
-+
-+              ahash_request_set_crypt(req, sg, ctx->ipad, keylen);
-+              ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
-+
-+              keylen = digestsize;
-+      } else {
-+              memcpy(ctx->ipad, key, keylen);
-+      }
-+
-+      /* Copy to opad */
-+      memset(ctx->ipad + keylen, 0, SHA256_BLOCK_SIZE - keylen);
-+      memcpy(opad, ctx->ipad, SHA256_BLOCK_SIZE);
-+
-+      /* Pad with HMAC constants */
-+      for (i = 0; i < SHA256_BLOCK_SIZE; i++) {
-+              ctx->ipad[i] ^= HMAC_IPAD_VALUE;
-+              opad[i] ^= HMAC_OPAD_VALUE;
-+      }
-+
-+      sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE);
-+
-+      /* Hash opad */
-+      ahash_request_set_crypt(req, sg, ctx->opad, SHA256_BLOCK_SIZE);
-+      ret = crypto_ahash_init(req);
-+      if (ret)
-+              goto exit;
-+
-+      /* Disable HASH_FINALIZE for opad hash */
-+      rctx->no_finalize = true;
-+
-+      ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
-+      if (ret)
-+              goto exit;
-+
-+      if (!IS_HASH_MD5(ctx->flags)) {
-+              u32 *opad_hash = (u32 *)ctx->opad;
-+
-+              for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
-+                      opad_hash[i] = cpu_to_be32(opad_hash[i]);
-+      }
-+
-+exit:
-+      kfree(opad);
-+err_req:
-+      ahash_request_free(req);
-+err_ahash:
-+      crypto_free_ahash(ahash_tfm);
-+
-+      return ret;
-+}
-+
-+static int eip93_hash_cra_init(struct crypto_tfm *tfm)
-+{
-+      struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-+      struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
-+                              struct eip93_alg_template, alg.ahash.halg.base);
-+
-+      crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-+                               sizeof(struct eip93_hash_reqctx));
-+
-+      ctx->mtk = tmpl->mtk;
-+      ctx->flags = tmpl->flags;
-+
-+      return 0;
-+}
-+
-+static int eip93_hash_digest(struct ahash_request *req)
-+{
-+      int ret;
-+
-+      ret = eip93_hash_init(req);
-+      if (ret)
-+              return ret;
-+
-+      return eip93_hash_finup(req);
-+}
-+
-+static int eip93_hash_import(struct ahash_request *req, const void *in)
-+{
-+      struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
-+      const struct eip93_hash_export_state *state = in;
-+      int ret;
-+
-+      ret = _eip93_hash_init(req, state->sa_state, state->sa_state_base);
-+      if (ret)
-+              goto err;
-+
-+      rctx->len = state->len;
-+      rctx->left_last = state->left_last;
-+      memcpy(&rctx->blocks, &state->blocks, sizeof(rctx->blocks));
-+
-+      return 0;
-+err:
-+      eip93_hash_free_data_blocks(req);
-+      eip93_hash_free_sa_state(req);
-+      return ret;
-+}
-+
-+static int eip93_hash_export(struct ahash_request *req, void *out)
-+{
-+      struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
-+      struct eip93_hash_export_state *state = out;
-+
-+      state->sa_state = rctx->sa_state;
-+      state->sa_state_base = rctx->sa_state_base;
-+      state->len = rctx->len;
-+      state->left_last = rctx->left_last;
-+      memcpy(&state->blocks, &rctx->blocks, sizeof(rctx->blocks));
-+
-+      return 0;
-+}
-+
-+struct eip93_alg_template eip93_alg_md5 = {
-+      .type = EIP93_ALG_TYPE_HASH,
-+      .flags = EIP93_HASH_MD5,
-+      .alg.ahash = {
-+              .init = eip93_hash_init,
-+              .update = eip93_hash_update,
-+              .final = eip93_hash_final,
-+              .finup = eip93_hash_finup,
-+              .digest = eip93_hash_digest,
-+              .export = eip93_hash_export,
-+              .import = eip93_hash_import,
-+              .halg = {
-+                      .digestsize = MD5_DIGEST_SIZE,
-+                      .statesize = sizeof(struct eip93_hash_export_state),
-+                      .base = {
-+                              .cra_name = "md5",
-+                              .cra_driver_name = "md5-eip93",
-+                              .cra_priority = 300,
-+                              .cra_flags = CRYPTO_ALG_ASYNC |
-+                                              CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                              CRYPTO_ALG_ALLOCATES_MEMORY,
-+                              .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
-+                              .cra_ctxsize = sizeof(struct eip93_hash_ctx),
-+                              .cra_init = eip93_hash_cra_init,
-+                              .cra_module = THIS_MODULE,
-+                      },
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_sha1 = {
-+      .type = EIP93_ALG_TYPE_HASH,
-+      .flags = EIP93_HASH_SHA1,
-+      .alg.ahash = {
-+              .init = eip93_hash_init,
-+              .update = eip93_hash_update,
-+              .final = eip93_hash_final,
-+              .finup = eip93_hash_finup,
-+              .digest = eip93_hash_digest,
-+              .export = eip93_hash_export,
-+              .import = eip93_hash_import,
-+              .halg = {
-+                      .digestsize = SHA1_DIGEST_SIZE,
-+                      .statesize = sizeof(struct eip93_hash_export_state),
-+                      .base = {
-+                              .cra_name = "sha1",
-+                              .cra_driver_name = "sha1-eip93",
-+                              .cra_priority = 300,
-+                              .cra_flags = CRYPTO_ALG_ASYNC |
-+                                              CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                              CRYPTO_ALG_ALLOCATES_MEMORY,
-+                              .cra_blocksize = SHA1_BLOCK_SIZE,
-+                              .cra_ctxsize = sizeof(struct eip93_hash_ctx),
-+                              .cra_init = eip93_hash_cra_init,
-+                              .cra_module = THIS_MODULE,
-+                      },
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_sha224 = {
-+      .type = EIP93_ALG_TYPE_HASH,
-+      .flags = EIP93_HASH_SHA224,
-+      .alg.ahash = {
-+              .init = eip93_hash_init,
-+              .update = eip93_hash_update,
-+              .final = eip93_hash_final,
-+              .finup = eip93_hash_finup,
-+              .digest = eip93_hash_digest,
-+              .export = eip93_hash_export,
-+              .import = eip93_hash_import,
-+              .halg = {
-+                      .digestsize = SHA224_DIGEST_SIZE,
-+                      .statesize = sizeof(struct eip93_hash_export_state),
-+                      .base = {
-+                              .cra_name = "sha224",
-+                              .cra_driver_name = "sha224-eip93",
-+                              .cra_priority = 300,
-+                              .cra_flags = CRYPTO_ALG_ASYNC |
-+                                              CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                              CRYPTO_ALG_ALLOCATES_MEMORY,
-+                              .cra_blocksize = SHA224_BLOCK_SIZE,
-+                              .cra_ctxsize = sizeof(struct eip93_hash_ctx),
-+                              .cra_init = eip93_hash_cra_init,
-+                              .cra_module = THIS_MODULE,
-+                      },
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_sha256 = {
-+      .type = EIP93_ALG_TYPE_HASH,
-+      .flags = EIP93_HASH_SHA256,
-+      .alg.ahash = {
-+              .init = eip93_hash_init,
-+              .update = eip93_hash_update,
-+              .final = eip93_hash_final,
-+              .finup = eip93_hash_finup,
-+              .digest = eip93_hash_digest,
-+              .export = eip93_hash_export,
-+              .import = eip93_hash_import,
-+              .halg = {
-+                      .digestsize = SHA256_DIGEST_SIZE,
-+                      .statesize = sizeof(struct eip93_hash_export_state),
-+                      .base = {
-+                              .cra_name = "sha256",
-+                              .cra_driver_name = "sha256-eip93",
-+                              .cra_priority = 300,
-+                              .cra_flags = CRYPTO_ALG_ASYNC |
-+                                              CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                              CRYPTO_ALG_ALLOCATES_MEMORY,
-+                              .cra_blocksize = SHA256_BLOCK_SIZE,
-+                              .cra_ctxsize = sizeof(struct eip93_hash_ctx),
-+                              .cra_init = eip93_hash_cra_init,
-+                              .cra_module = THIS_MODULE,
-+                      },
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_hmac_md5 = {
-+      .type = EIP93_ALG_TYPE_HASH,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5,
-+      .alg.ahash = {
-+              .init = eip93_hash_init,
-+              .update = eip93_hash_update,
-+              .final = eip93_hash_final,
-+              .finup = eip93_hash_finup,
-+              .digest = eip93_hash_digest,
-+              .setkey = eip93_hash_hmac_setkey,
-+              .export = eip93_hash_export,
-+              .import = eip93_hash_import,
-+              .halg = {
-+                      .digestsize = MD5_DIGEST_SIZE,
-+                      .statesize = sizeof(struct eip93_hash_export_state),
-+                      .base = {
-+                              .cra_name = "hmac(md5)",
-+                              .cra_driver_name = "hmac(md5-eip93)",
-+                              .cra_priority = 300,
-+                              .cra_flags = CRYPTO_ALG_ASYNC |
-+                                              CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                              CRYPTO_ALG_ALLOCATES_MEMORY,
-+                              .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
-+                              .cra_ctxsize = sizeof(struct eip93_hash_ctx),
-+                              .cra_init = eip93_hash_cra_init,
-+                              .cra_module = THIS_MODULE,
-+                      },
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_hmac_sha1 = {
-+      .type = EIP93_ALG_TYPE_HASH,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1,
-+      .alg.ahash = {
-+              .init = eip93_hash_init,
-+              .update = eip93_hash_update,
-+              .final = eip93_hash_final,
-+              .finup = eip93_hash_finup,
-+              .digest = eip93_hash_digest,
-+              .setkey = eip93_hash_hmac_setkey,
-+              .export = eip93_hash_export,
-+              .import = eip93_hash_import,
-+              .halg = {
-+                      .digestsize = SHA1_DIGEST_SIZE,
-+                      .statesize = sizeof(struct eip93_hash_export_state),
-+                      .base = {
-+                              .cra_name = "hmac(sha1)",
-+                              .cra_driver_name = "hmac(sha1-eip93)",
-+                              .cra_priority = 300,
-+                              .cra_flags = CRYPTO_ALG_ASYNC |
-+                                              CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                              CRYPTO_ALG_ALLOCATES_MEMORY,
-+                              .cra_blocksize = SHA1_BLOCK_SIZE,
-+                              .cra_ctxsize = sizeof(struct eip93_hash_ctx),
-+                              .cra_init = eip93_hash_cra_init,
-+                              .cra_module = THIS_MODULE,
-+                      },
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_hmac_sha224 = {
-+      .type = EIP93_ALG_TYPE_HASH,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224,
-+      .alg.ahash = {
-+              .init = eip93_hash_init,
-+              .update = eip93_hash_update,
-+              .final = eip93_hash_final,
-+              .finup = eip93_hash_finup,
-+              .digest = eip93_hash_digest,
-+              .setkey = eip93_hash_hmac_setkey,
-+              .export = eip93_hash_export,
-+              .import = eip93_hash_import,
-+              .halg = {
-+                      .digestsize = SHA224_DIGEST_SIZE,
-+                      .statesize = sizeof(struct eip93_hash_export_state),
-+                      .base = {
-+                              .cra_name = "hmac(sha224)",
-+                              .cra_driver_name = "hmac(sha224-eip93)",
-+                              .cra_priority = 300,
-+                              .cra_flags = CRYPTO_ALG_ASYNC |
-+                                              CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                              CRYPTO_ALG_ALLOCATES_MEMORY,
-+                              .cra_blocksize = SHA224_BLOCK_SIZE,
-+                              .cra_ctxsize = sizeof(struct eip93_hash_ctx),
-+                              .cra_init = eip93_hash_cra_init,
-+                              .cra_module = THIS_MODULE,
-+                      },
-+              },
-+      },
-+};
-+
-+struct eip93_alg_template eip93_alg_hmac_sha256 = {
-+      .type = EIP93_ALG_TYPE_HASH,
-+      .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256,
-+      .alg.ahash = {
-+              .init = eip93_hash_init,
-+              .update = eip93_hash_update,
-+              .final = eip93_hash_final,
-+              .finup = eip93_hash_finup,
-+              .digest = eip93_hash_digest,
-+              .setkey = eip93_hash_hmac_setkey,
-+              .export = eip93_hash_export,
-+              .import = eip93_hash_import,
-+              .halg = {
-+                      .digestsize = SHA256_DIGEST_SIZE,
-+                      .statesize = sizeof(struct eip93_hash_export_state),
-+                      .base = {
-+                              .cra_name = "hmac(sha256)",
-+                              .cra_driver_name = "hmac(sha256-eip93)",
-+                              .cra_priority = 300,
-+                              .cra_flags = CRYPTO_ALG_ASYNC |
-+                                              CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                                              CRYPTO_ALG_ALLOCATES_MEMORY,
-+                              .cra_blocksize = SHA256_BLOCK_SIZE,
-+                              .cra_ctxsize = sizeof(struct eip93_hash_ctx),
-+                              .cra_init = eip93_hash_cra_init,
-+                              .cra_module = THIS_MODULE,
-+                      },
-+              },
-+      },
-+};
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.h
-@@ -0,0 +1,72 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2019 - 2021
-+ *
-+ * Richard van Schagen <vschagen@icloud.com>
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+#ifndef _EIP93_HASH_H_
-+#define _EIP93_HASH_H_
-+
-+#include <crypto/sha2.h>
-+
-+#include "eip93-main.h"
-+
-+struct eip93_hash_ctx {
-+      struct eip93_device     *mtk;
-+      u32                     flags;
-+
-+      u8                      ipad[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
-+      u8                      opad[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
-+};
-+
-+struct eip93_hash_reqctx {
-+      struct eip93_device     *mtk;
-+
-+      struct sa_record        *sa_record;
-+      dma_addr_t              sa_record_base;
-+
-+      struct sa_record        *sa_record_hmac;
-+      dma_addr_t              sa_record_hmac_base;
-+
-+      struct sa_state         *sa_state;
-+      dma_addr_t              sa_state_base;
-+
-+      /* Don't enable HASH_FINALIZE when last block is sent */
-+      bool                    no_finalize;
-+
-+      /*
-+       * EIP93 requires data to be accumulated in block of 64 bytes
-+       * for intermediate hash calculation.
-+       */
-+      u64                     len;
-+      u32                     left_last;
-+      struct list_head        blocks;
-+};
-+
-+struct mkt_hash_block {
-+      struct list_head        list;
-+      u8                      data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
-+      dma_addr_t              data_dma;
-+};
-+
-+struct eip93_hash_export_state {
-+      u64                     len;
-+      u32                     left_last;
-+      struct sa_state         *sa_state;
-+      dma_addr_t              sa_state_base;
-+      struct list_head        blocks;
-+};
-+
-+void eip93_hash_handle_result(struct crypto_async_request *async, int err);
-+
-+extern struct eip93_alg_template eip93_alg_md5;
-+extern struct eip93_alg_template eip93_alg_sha1;
-+extern struct eip93_alg_template eip93_alg_sha224;
-+extern struct eip93_alg_template eip93_alg_sha256;
-+extern struct eip93_alg_template eip93_alg_hmac_md5;
-+extern struct eip93_alg_template eip93_alg_hmac_sha1;
-+extern struct eip93_alg_template eip93_alg_hmac_sha224;
-+extern struct eip93_alg_template eip93_alg_hmac_sha256;
-+
-+#endif /* _EIP93_HASH_H_ */
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-main.c
-@@ -0,0 +1,502 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2019 - 2021
-+ *
-+ * Richard van Schagen <vschagen@icloud.com>
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+
-+#include <linux/atomic.h>
-+#include <linux/clk.h>
-+#include <linux/delay.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/interrupt.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/platform_device.h>
-+#include <linux/spinlock.h>
-+#include <crypto/aes.h>
-+#include <crypto/ctr.h>
-+
-+#include "eip93-main.h"
-+#include "eip93-regs.h"
-+#include "eip93-common.h"
-+#include "eip93-cipher.h"
-+#include "eip93-aes.h"
-+#include "eip93-des.h"
-+#include "eip93-aead.h"
-+#include "eip93-hash.h"
-+
-+static struct eip93_alg_template *eip93_algs[] = {
-+      &eip93_alg_ecb_des,
-+      &eip93_alg_cbc_des,
-+      &eip93_alg_ecb_des3_ede,
-+      &eip93_alg_cbc_des3_ede,
-+      &eip93_alg_ecb_aes,
-+      &eip93_alg_cbc_aes,
-+      &eip93_alg_ctr_aes,
-+      &eip93_alg_rfc3686_aes,
-+      &eip93_alg_authenc_hmac_md5_cbc_des,
-+      &eip93_alg_authenc_hmac_sha1_cbc_des,
-+      &eip93_alg_authenc_hmac_sha224_cbc_des,
-+      &eip93_alg_authenc_hmac_sha256_cbc_des,
-+      &eip93_alg_authenc_hmac_md5_cbc_des3_ede,
-+      &eip93_alg_authenc_hmac_sha1_cbc_des3_ede,
-+      &eip93_alg_authenc_hmac_sha224_cbc_des3_ede,
-+      &eip93_alg_authenc_hmac_sha256_cbc_des3_ede,
-+      &eip93_alg_authenc_hmac_md5_cbc_aes,
-+      &eip93_alg_authenc_hmac_sha1_cbc_aes,
-+      &eip93_alg_authenc_hmac_sha224_cbc_aes,
-+      &eip93_alg_authenc_hmac_sha256_cbc_aes,
-+      &eip93_alg_authenc_hmac_md5_rfc3686_aes,
-+      &eip93_alg_authenc_hmac_sha1_rfc3686_aes,
-+      &eip93_alg_authenc_hmac_sha224_rfc3686_aes,
-+      &eip93_alg_authenc_hmac_sha256_rfc3686_aes,
-+      &eip93_alg_md5,
-+      &eip93_alg_sha1,
-+      &eip93_alg_sha224,
-+      &eip93_alg_sha256,
-+      &eip93_alg_hmac_md5,
-+      &eip93_alg_hmac_sha1,
-+      &eip93_alg_hmac_sha224,
-+      &eip93_alg_hmac_sha256,
-+};
-+
-+inline void eip93_irq_disable(struct eip93_device *mtk, u32 mask)
-+{
-+      __raw_writel(mask, mtk->base + EIP93_REG_MASK_DISABLE);
-+}
-+
-+inline void eip93_irq_enable(struct eip93_device *mtk, u32 mask)
-+{
-+      __raw_writel(mask, mtk->base + EIP93_REG_MASK_ENABLE);
-+}
-+
-+inline void eip93_irq_clear(struct eip93_device *mtk, u32 mask)
-+{
-+      __raw_writel(mask, mtk->base + EIP93_REG_INT_CLR);
-+}
-+
-+static void eip93_unregister_algs(unsigned int i)
-+{
-+      unsigned int j;
-+
-+      for (j = 0; j < i; j++) {
-+              switch (eip93_algs[j]->type) {
-+              case EIP93_ALG_TYPE_SKCIPHER:
-+                      crypto_unregister_skcipher(&eip93_algs[j]->alg.skcipher);
-+                      break;
-+              case EIP93_ALG_TYPE_AEAD:
-+                      crypto_unregister_aead(&eip93_algs[j]->alg.aead);
-+                      break;
-+              case EIP93_ALG_TYPE_HASH:
-+                      crypto_unregister_ahash(&eip93_algs[i]->alg.ahash);
-+                      break;
-+              }
-+      }
-+}
-+
-+static int eip93_register_algs(struct eip93_device *mtk, u32 supported_algo_flags)
-+{
-+      unsigned int i;
-+      int ret = 0;
-+
-+      for (i = 0; i < ARRAY_SIZE(eip93_algs); i++) {
-+              u32 alg_flags = eip93_algs[i]->flags;
-+
-+              eip93_algs[i]->mtk = mtk;
-+
-+              if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) &&
-+                  !(supported_algo_flags & EIP93_PE_OPTION_TDES))
-+                      continue;
-+
-+              if (IS_AES(alg_flags)) {
-+                      if (!(supported_algo_flags & EIP93_PE_OPTION_AES))
-+                              continue;
-+
-+                      if (!IS_HMAC(alg_flags)) {
-+                              if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128)
-+                                      eip93_algs[i]->alg.skcipher.max_keysize =
-+                                              AES_KEYSIZE_128;
-+
-+                              if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192)
-+                                      eip93_algs[i]->alg.skcipher.max_keysize =
-+                                              AES_KEYSIZE_192;
-+
-+                              if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256)
-+                                      eip93_algs[i]->alg.skcipher.max_keysize =
-+                                              AES_KEYSIZE_256;
-+
-+                              if (IS_RFC3686(alg_flags))
-+                                      eip93_algs[i]->alg.skcipher.max_keysize +=
-+                                              CTR_RFC3686_NONCE_SIZE;
-+                      }
-+              }
-+
-+              if (IS_HASH_MD5(alg_flags) &&
-+                  !(supported_algo_flags & EIP93_PE_OPTION_MD5))
-+                      continue;
-+
-+              if (IS_HASH_SHA1(alg_flags) &&
-+                  !(supported_algo_flags & EIP93_PE_OPTION_SHA_1))
-+                      continue;
-+
-+              if (IS_HASH_SHA224(alg_flags) &&
-+                  !(supported_algo_flags & EIP93_PE_OPTION_SHA_224))
-+                      continue;
-+
-+              if (IS_HASH_SHA256(alg_flags) &&
-+                  !(supported_algo_flags & EIP93_PE_OPTION_SHA_256))
-+                      continue;
-+
-+              switch (eip93_algs[i]->type) {
-+              case EIP93_ALG_TYPE_SKCIPHER:
-+                      ret = crypto_register_skcipher(&eip93_algs[i]->alg.skcipher);
-+                      break;
-+              case EIP93_ALG_TYPE_AEAD:
-+                      ret = crypto_register_aead(&eip93_algs[i]->alg.aead);
-+                      break;
-+              case EIP93_ALG_TYPE_HASH:
-+                      ret = crypto_register_ahash(&eip93_algs[i]->alg.ahash);
-+                      break;
-+              }
-+              if (ret)
-+                      goto fail;
-+      }
-+
-+      return 0;
-+
-+fail:
-+      eip93_unregister_algs(i);
-+
-+      return ret;
-+}
-+
-+static void eip93_handle_result_descriptor(struct eip93_device *mtk)
-+{
-+      struct crypto_async_request *async;
-+      struct eip93_descriptor *rdesc;
-+      u16 desc_flags, crypto_idr;
-+      bool last_entry;
-+      int handled, left, err;
-+      u32 pe_ctrl_stat;
-+      u32 pe_length;
-+
-+get_more:
-+      handled = 0;
-+
-+      left = readl(mtk->base + EIP93_REG_PE_RD_COUNT) & EIP93_PE_RD_COUNT;
-+
-+      if (!left) {
-+              eip93_irq_clear(mtk, EIP93_INT_RDR_THRESH);
-+              eip93_irq_enable(mtk, EIP93_INT_RDR_THRESH);
-+              return;
-+      }
-+
-+      last_entry = false;
-+
-+      while (left) {
-+              rdesc = eip93_get_descriptor(mtk);
-+              if (IS_ERR(rdesc)) {
-+                      dev_err(mtk->dev, "Ndesc: %d nreq: %d\n",
-+                              handled, left);
-+                      err = -EIO;
-+                      break;
-+              }
-+              /* make sure DMA is finished writing */
-+              do {
-+                      pe_ctrl_stat = READ_ONCE(rdesc->pe_ctrl_stat_word);
-+                      pe_length = READ_ONCE(rdesc->pe_length_word);
-+              } while (FIELD_GET(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, pe_ctrl_stat) !=
-+                       EIP93_PE_CTRL_PE_READY ||
-+                       FIELD_GET(EIP93_PE_LENGTH_HOST_PE_READY, pe_length) !=
-+                       EIP93_PE_LENGTH_PE_READY);
-+
-+              err = rdesc->pe_ctrl_stat_word & (EIP93_PE_CTRL_PE_EXT_ERR_CODE |
-+                                                EIP93_PE_CTRL_PE_EXT_ERR |
-+                                                EIP93_PE_CTRL_PE_SEQNUM_ERR |
-+                                                EIP93_PE_CTRL_PE_PAD_ERR |
-+                                                EIP93_PE_CTRL_PE_AUTH_ERR);
-+
-+              desc_flags = FIELD_GET(EIP93_PE_USER_ID_DESC_FLAGS, rdesc->user_id);
-+              crypto_idr = FIELD_GET(EIP93_PE_USER_ID_CRYPTO_IDR, rdesc->user_id);
-+
-+              writel(1, mtk->base + EIP93_REG_PE_RD_COUNT);
-+              eip93_irq_clear(mtk, EIP93_INT_RDR_THRESH);
-+
-+              handled++;
-+              left--;
-+
-+              if (desc_flags & EIP93_DESC_LAST) {
-+                      last_entry = true;
-+                      break;
-+              }
-+      }
-+
-+      if (!last_entry)
-+              goto get_more;
-+
-+      /* Get crypto async ref only for last descriptor */
-+      scoped_guard(spinlock_bh, &mtk->ring->idr_lock) {
-+              async = idr_find(&mtk->ring->crypto_async_idr, crypto_idr);
-+              idr_remove(&mtk->ring->crypto_async_idr, crypto_idr);
-+      }
-+
-+      /* Parse error in ctrl stat word */
-+      err = eip93_parse_ctrl_stat_err(mtk, err);
-+
-+      if (desc_flags & EIP93_DESC_SKCIPHER)
-+              eip93_skcipher_handle_result(async, err);
-+
-+      if (desc_flags & EIP93_DESC_AEAD)
-+              eip93_aead_handle_result(async, err);
-+
-+      if (desc_flags & EIP93_DESC_HASH)
-+              eip93_hash_handle_result(async, err);
-+
-+      goto get_more;
-+}
-+
-+static void eip93_done_task(unsigned long data)
-+{
-+      struct eip93_device *mtk = (struct eip93_device *)data;
-+
-+      eip93_handle_result_descriptor(mtk);
-+}
-+
-+static irqreturn_t eip93_irq_handler(int irq, void *data)
-+{
-+      struct eip93_device *mtk = data;
-+      u32 irq_status;
-+
-+      irq_status = readl(mtk->base + EIP93_REG_INT_MASK_STAT);
-+      if (FIELD_GET(EIP93_INT_RDR_THRESH, irq_status)) {
-+              eip93_irq_disable(mtk, EIP93_INT_RDR_THRESH);
-+              tasklet_schedule(&mtk->ring->done_task);
-+              return IRQ_HANDLED;
-+      }
-+
-+      /* Ignore errors in AUTO mode, handled by the RDR */
-+      eip93_irq_clear(mtk, irq_status);
-+      if (irq_status)
-+              eip93_irq_disable(mtk, irq_status);
-+
-+      return IRQ_NONE;
-+}
-+
-+static void eip93_initialize(struct eip93_device *mtk, u32 supported_algo_flags)
-+{
-+      u32 val;
-+
-+      /* Reset PE and rings */
-+      val = EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING;
-+      val |= EIP93_PE_TARGET_AUTO_RING_MODE;
-+      /* For Auto more, update the CDR ring owner after processing */
-+      val |= EIP93_PE_CONFIG_EN_CDR_UPDATE;
-+      writel(val, mtk->base + EIP93_REG_PE_CONFIG);
-+
-+      /* Wait for PE and ring to reset */
-+      usleep_range(10, 20);
-+
-+      /* Release PE and ring reset */
-+      val = readl(mtk->base + EIP93_REG_PE_CONFIG);
-+      val &= ~(EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING);
-+      writel(val, mtk->base + EIP93_REG_PE_CONFIG);
-+
-+      /* Config Clocks */
-+      val = EIP93_PE_CLOCK_EN_PE_CLK;
-+      if (supported_algo_flags & EIP93_PE_OPTION_TDES)
-+              val |= EIP93_PE_CLOCK_EN_DES_CLK;
-+      if (supported_algo_flags & EIP93_PE_OPTION_AES)
-+              val |= EIP93_PE_CLOCK_EN_AES_CLK;
-+      if (supported_algo_flags &
-+          (EIP93_PE_OPTION_MD5 | EIP93_PE_OPTION_SHA_1 | EIP93_PE_OPTION_SHA_224 |
-+           EIP93_PE_OPTION_SHA_256))
-+              val |= EIP93_PE_CLOCK_EN_HASH_CLK;
-+      writel(val, mtk->base + EIP93_REG_PE_CLOCK_CTRL);
-+
-+      /* Config DMA thresholds */
-+      val = FIELD_PREP(EIP93_PE_OUTBUF_THRESH, 128) |
-+            FIELD_PREP(EIP93_PE_INBUF_THRESH, 128);
-+      writel(val, mtk->base + EIP93_REG_PE_BUF_THRESH);
-+
-+      /* Clear/ack all interrupts before disable all */
-+      eip93_irq_clear(mtk, EIP93_INT_ALL);
-+      eip93_irq_disable(mtk, EIP93_INT_ALL);
-+
-+      /* Setup CRD threshold to trigger interrupt */
-+      val = FIELD_PREP(EIPR93_PE_CDR_THRESH, EIP93_RING_NUM - EIP93_RING_BUSY);
-+      /*
-+       * Configure RDR interrupt to be triggered if RD counter is not 0
-+       * for more than 2^(N+10) system clocks.
-+       */
-+      val |= FIELD_PREP(EIPR93_PE_RD_TIMEOUT, 5) | EIPR93_PE_TIMEROUT_EN;
-+      writel(val, mtk->base + EIP93_REG_PE_RING_THRESH);
-+}
-+
-+static void eip93_desc_free(struct eip93_device *mtk)
-+{
-+      writel(0, mtk->base + EIP93_REG_PE_RING_CONFIG);
-+      writel(0, mtk->base + EIP93_REG_PE_CDR_BASE);
-+      writel(0, mtk->base + EIP93_REG_PE_RDR_BASE);
-+}
-+
-+static int eip93_set_ring(struct eip93_device *mtk, struct eip93_desc_ring *ring)
-+{
-+      ring->offset = sizeof(struct eip93_descriptor);
-+      ring->base = dmam_alloc_coherent(mtk->dev,
-+                                       sizeof(struct eip93_descriptor) * EIP93_RING_NUM,
-+                                       &ring->base_dma, GFP_KERNEL);
-+      if (!ring->base)
-+              return -ENOMEM;
-+
-+      ring->write = ring->base;
-+      ring->base_end = ring->base + sizeof(struct eip93_descriptor) * (EIP93_RING_NUM - 1);
-+      ring->read  = ring->base;
-+
-+      return 0;
-+}
-+
-+static int eip93_desc_init(struct eip93_device *mtk)
-+{
-+      struct eip93_desc_ring *cdr = &mtk->ring->cdr;
-+      struct eip93_desc_ring *rdr = &mtk->ring->rdr;
-+      int ret;
-+      u32 val;
-+
-+      ret = eip93_set_ring(mtk, cdr);
-+      if (ret)
-+              return ret;
-+
-+      ret = eip93_set_ring(mtk, rdr);
-+      if (ret)
-+              return ret;
-+
-+      writel((u32 __force)cdr->base_dma, mtk->base + EIP93_REG_PE_CDR_BASE);
-+      writel((u32 __force)rdr->base_dma, mtk->base + EIP93_REG_PE_RDR_BASE);
-+
-+      val = FIELD_PREP(EIP93_PE_RING_SIZE, EIP93_RING_NUM - 1);
-+      writel(val, mtk->base + EIP93_REG_PE_RING_CONFIG);
-+
-+      atomic_set(&mtk->ring->free, EIP93_RING_NUM - 1);
-+
-+      return 0;
-+}
-+
-+static void eip93_cleanup(struct eip93_device *mtk)
-+{
-+      tasklet_kill(&mtk->ring->done_task);
-+
-+      /* Clear/ack all interrupts before disable all */
-+      eip93_irq_clear(mtk, EIP93_INT_ALL);
-+      eip93_irq_disable(mtk, EIP93_INT_ALL);
-+
-+      writel(0, mtk->base + EIP93_REG_PE_CLOCK_CTRL);
-+
-+      eip93_desc_free(mtk);
-+
-+      idr_destroy(&mtk->ring->crypto_async_idr);
-+}
-+
-+static int eip93_crypto_probe(struct platform_device *pdev)
-+{
-+      struct device *dev = &pdev->dev;
-+      struct eip93_device *mtk;
-+      u32 ver, algo_flags;
-+      int ret;
-+
-+      mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
-+      if (!mtk)
-+              return -ENOMEM;
-+
-+      mtk->dev = dev;
-+      platform_set_drvdata(pdev, mtk);
-+
-+      mtk->base = devm_platform_ioremap_resource(pdev, 0);
-+      if (IS_ERR(mtk->base))
-+              return PTR_ERR(mtk->base);
-+
-+      mtk->irq = platform_get_irq(pdev, 0);
-+      if (mtk->irq < 0)
-+              return mtk->irq;
-+
-+      ret = devm_request_threaded_irq(mtk->dev, mtk->irq, eip93_irq_handler,
-+                                      NULL, IRQF_ONESHOT,
-+                                      dev_name(mtk->dev), mtk);
-+
-+      mtk->ring = devm_kcalloc(mtk->dev, 1, sizeof(*mtk->ring), GFP_KERNEL);
-+      if (!mtk->ring)
-+              return -ENOMEM;
-+
-+      ret = eip93_desc_init(mtk);
-+
-+      if (ret)
-+              return ret;
-+
-+      tasklet_init(&mtk->ring->done_task, eip93_done_task, (unsigned long)mtk);
-+
-+      spin_lock_init(&mtk->ring->read_lock);
-+      spin_lock_init(&mtk->ring->write_lock);
-+
-+      spin_lock_init(&mtk->ring->idr_lock);
-+      idr_init(&mtk->ring->crypto_async_idr);
-+
-+      algo_flags = readl(mtk->base + EIP93_REG_PE_OPTION_1);
-+
-+      eip93_initialize(mtk, algo_flags);
-+
-+      /* Init finished, enable RDR interrupt */
-+      eip93_irq_enable(mtk, EIP93_INT_RDR_THRESH);
-+
-+      ret = eip93_register_algs(mtk, algo_flags);
-+      if (ret) {
-+              eip93_cleanup(mtk);
-+              return ret;
-+      }
-+
-+      ver = readl(mtk->base + EIP93_REG_PE_REVISION);
-+      /* EIP_EIP_NO:MAJOR_HW_REV:MINOR_HW_REV:HW_PATCH,PE(ALGO_FLAGS) */
-+      dev_info(mtk->dev, "EIP%lu:%lx:%lx:%lx,PE(0x%x:0x%x)\n",
-+               FIELD_GET(EIP93_PE_REVISION_EIP_NO, ver),
-+               FIELD_GET(EIP93_PE_REVISION_MAJ_HW_REV, ver),
-+               FIELD_GET(EIP93_PE_REVISION_MIN_HW_REV, ver),
-+               FIELD_GET(EIP93_PE_REVISION_HW_PATCH, ver),
-+               algo_flags,
-+               readl(mtk->base + EIP93_REG_PE_OPTION_0));
-+
-+      return 0;
-+}
-+
-+static void eip93_crypto_remove(struct platform_device *pdev)
-+{
-+      struct eip93_device *mtk = platform_get_drvdata(pdev);
-+
-+      eip93_unregister_algs(ARRAY_SIZE(eip93_algs));
-+      eip93_cleanup(mtk);
-+}
-+
-+static const struct of_device_id eip93_crypto_of_match[] = {
-+      { .compatible = "inside-secure,safexcel-eip93i", },
-+      { .compatible = "inside-secure,safexcel-eip93ie", },
-+      { .compatible = "inside-secure,safexcel-eip93is", },
-+      { .compatible = "inside-secure,safexcel-eip93ies", },
-+      /* IW not supported currently, missing AES-XCB-MAC/AES-CCM */
-+      /* { .compatible = "inside-secure,safexcel-eip93iw", }, */
-+      {}
-+};
-+MODULE_DEVICE_TABLE(of, eip93_crypto_of_match);
-+
-+static struct platform_driver eip93_crypto_driver = {
-+      .probe = eip93_crypto_probe,
-+      .remove_new = eip93_crypto_remove,
-+      .driver = {
-+              .name = "mtk-eip93",
-+              .of_match_table = eip93_crypto_of_match,
-+      },
-+};
-+module_platform_driver(eip93_crypto_driver);
-+
-+MODULE_AUTHOR("Richard van Schagen <vschagen@cs.com>");
-+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
-+MODULE_DESCRIPTION("Mediatek EIP-93 crypto engine driver");
-+MODULE_LICENSE("GPL");
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-main.h
-@@ -0,0 +1,155 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2019 - 2021
-+ *
-+ * Richard van Schagen <vschagen@icloud.com>
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+#ifndef _EIP93_MAIN_H_
-+#define _EIP93_MAIN_H_
-+
-+#include <crypto/internal/aead.h>
-+#include <crypto/internal/hash.h>
-+#include <crypto/internal/rng.h>
-+#include <crypto/internal/skcipher.h>
-+#include <linux/device.h>
-+#include <linux/interrupt.h>
-+
-+#include "eip93-regs.h"
-+
-+#define EIP93_RING_BUSY_DELAY         500
-+
-+#define EIP93_RING_NUM                        512
-+#define EIP93_RING_BUSY                       32
-+#define EIP93_CRA_PRIORITY            1500
-+
-+#define EIP93_RING_SA_STATE_ADDR(base, idx)   ((base) + (idx))
-+#define EIP93_RING_SA_STATE_DMA(dma_base, idx)        ((u32 __force)(dma_base) + \
-+                                               ((idx) * sizeof(struct sa_state)))
-+
-+/* cipher algorithms */
-+#define EIP93_ALG_DES                 BIT(0)
-+#define EIP93_ALG_3DES                        BIT(1)
-+#define EIP93_ALG_AES                 BIT(2)
-+#define EIP93_ALG_MASK                        GENMASK(2, 0)
-+/* hash and hmac algorithms */
-+#define EIP93_HASH_MD5                        BIT(3)
-+#define EIP93_HASH_SHA1                       BIT(4)
-+#define EIP93_HASH_SHA224                     BIT(5)
-+#define EIP93_HASH_SHA256                     BIT(6)
-+#define EIP93_HASH_HMAC                       BIT(7)
-+#define EIP93_HASH_MASK                       GENMASK(6, 3)
-+/* cipher modes */
-+#define EIP93_MODE_CBC                        BIT(8)
-+#define EIP93_MODE_ECB                        BIT(9)
-+#define EIP93_MODE_CTR                        BIT(10)
-+#define EIP93_MODE_RFC3686            BIT(11)
-+#define EIP93_MODE_MASK                       GENMASK(10, 8)
-+
-+/* cipher encryption/decryption operations */
-+#define EIP93_ENCRYPT                 BIT(12)
-+#define EIP93_DECRYPT                 BIT(13)
-+
-+#define EIP93_BUSY                    BIT(14)
-+
-+/* descriptor flags */
-+#define EIP93_DESC_DMA_IV                     BIT(0)
-+#define EIP93_DESC_IPSEC                      BIT(1)
-+#define EIP93_DESC_FINISH                     BIT(2)
-+#define EIP93_DESC_LAST                       BIT(3)
-+#define EIP93_DESC_FAKE_HMAC          BIT(4)
-+#define EIP93_DESC_PRNG                       BIT(5)
-+#define EIP93_DESC_HASH                       BIT(6)
-+#define EIP93_DESC_AEAD                       BIT(7)
-+#define EIP93_DESC_SKCIPHER           BIT(8)
-+#define EIP93_DESC_ASYNC                      BIT(9)
-+
-+#define IS_DMA_IV(desc_flags)         ((desc_flags) & EIP93_DESC_DMA_IV)
-+
-+#define IS_DES(flags)                 ((flags) & EIP93_ALG_DES)
-+#define IS_3DES(flags)                        ((flags) & EIP93_ALG_3DES)
-+#define IS_AES(flags)                 ((flags) & EIP93_ALG_AES)
-+
-+#define IS_HASH_MD5(flags)            ((flags) & EIP93_HASH_MD5)
-+#define IS_HASH_SHA1(flags)           ((flags) & EIP93_HASH_SHA1)
-+#define IS_HASH_SHA224(flags)         ((flags) & EIP93_HASH_SHA224)
-+#define IS_HASH_SHA256(flags)         ((flags) & EIP93_HASH_SHA256)
-+#define IS_HMAC(flags)                        ((flags) & EIP93_HASH_HMAC)
-+
-+#define IS_CBC(mode)                  ((mode) & EIP93_MODE_CBC)
-+#define IS_ECB(mode)                  ((mode) & EIP93_MODE_ECB)
-+#define IS_CTR(mode)                  ((mode) & EIP93_MODE_CTR)
-+#define IS_RFC3686(mode)              ((mode) & EIP93_MODE_RFC3686)
-+
-+#define IS_BUSY(flags)                        ((flags) & EIP93_BUSY)
-+
-+#define IS_ENCRYPT(dir)                       ((dir) & EIP93_ENCRYPT)
-+#define IS_DECRYPT(dir)                       ((dir) & EIP93_DECRYPT)
-+
-+#define IS_CIPHER(flags)              ((flags) & (EIP93_ALG_DES | \
-+                                                  EIP93_ALG_3DES |  \
-+                                                  EIP93_ALG_AES))
-+
-+#define IS_HASH(flags)                        ((flags) & (EIP93_HASH_MD5 |  \
-+                                                  EIP93_HASH_SHA1 |   \
-+                                                  EIP93_HASH_SHA224 | \
-+                                                  EIP93_HASH_SHA256))
-+
-+/**
-+ * struct eip93_device - crypto engine device structure
-+ */
-+struct eip93_device {
-+      void __iomem            *base;
-+      struct device           *dev;
-+      struct clk              *clk;
-+      int                     irq;
-+      struct eip93_ring               *ring;
-+};
-+
-+struct eip93_desc_ring {
-+      void                    *base;
-+      void                    *base_end;
-+      dma_addr_t              base_dma;
-+      /* write and read pointers */
-+      void                    *read;
-+      void                    *write;
-+      /* descriptor element offset */
-+      u32                     offset;
-+};
-+
-+struct eip93_state_pool {
-+      void                    *base;
-+      dma_addr_t              base_dma;
-+};
-+
-+struct eip93_ring {
-+      struct tasklet_struct           done_task;
-+      /* command/result rings */
-+      struct eip93_desc_ring          cdr;
-+      struct eip93_desc_ring          rdr;
-+      spinlock_t                      write_lock;
-+      spinlock_t                      read_lock;
-+      atomic_t                        free;
-+      /* aync idr */
-+      spinlock_t                      idr_lock;
-+      struct idr                      crypto_async_idr;
-+};
-+
-+enum eip93_alg_type {
-+      EIP93_ALG_TYPE_AEAD,
-+      EIP93_ALG_TYPE_SKCIPHER,
-+      EIP93_ALG_TYPE_HASH,
-+};
-+
-+struct eip93_alg_template {
-+      struct eip93_device     *mtk;
-+      enum eip93_alg_type     type;
-+      u32                     flags;
-+      union {
-+              struct aead_alg         aead;
-+              struct skcipher_alg     skcipher;
-+              struct ahash_alg        ahash;
-+      } alg;
-+};
-+
-+#endif /* _EIP93_MAIN_H_ */
---- /dev/null
-+++ b/drivers/crypto/inside-secure/eip93/eip93-regs.h
-@@ -0,0 +1,335 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/*
-+ * Copyright (C) 2019 - 2021
-+ *
-+ * Richard van Schagen <vschagen@icloud.com>
-+ * Christian Marangi <ansuelsmth@gmail.com
-+ */
-+#ifndef REG_EIP93_H
-+#define REG_EIP93_H
-+
-+#define EIP93_REG_PE_CTRL_STAT                        0x0
-+#define   EIP93_PE_CTRL_PE_PAD_CTRL_STAT      GENMASK(31, 24)
-+#define   EIP93_PE_CTRL_PE_EXT_ERR_CODE               GENMASK(23, 20)
-+#define   EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING 0x8
-+#define   EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR 0x7
-+#define   EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH 0x6
-+#define   EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH        0x5
-+#define   EIP93_PE_CTRL_PE_EXT_ERR_SPI                0x4
-+#define   EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO 0x3
-+#define   EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP 0x2
-+#define   EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER 0x1
-+#define   EIP93_PE_CTRL_PE_EXT_ERR_BUS                0x0
-+#define   EIP93_PE_CTRL_PE_EXT_ERR            BIT(19)
-+#define   EIP93_PE_CTRL_PE_SEQNUM_ERR         BIT(18)
-+#define   EIP93_PE_CTRL_PE_PAD_ERR            BIT(17)
-+#define   EIP93_PE_CTRL_PE_AUTH_ERR           BIT(16)
-+#define   EIP93_PE_CTRL_PE_PAD_VALUE          GENMASK(15, 8)
-+#define   EIP93_PE_CTRL_PE_PRNG_MODE          GENMASK(7, 6)
-+#define   EIP93_PE_CTRL_PE_HASH_FINAL         BIT(4)
-+#define   EIP93_PE_CTRL_PE_INIT_ARC4          BIT(3)
-+#define   EIP93_PE_CTRL_PE_READY_DES_TRING_OWN        GENMASK(1, 0)
-+#define   EIP93_PE_CTRL_PE_READY              0x2
-+#define   EIP93_PE_CTRL_HOST_READY            0x1
-+#define EIP93_REG_PE_SOURCE_ADDR              0x4
-+#define EIP93_REG_PE_DEST_ADDR                        0x8
-+#define EIP93_REG_PE_SA_ADDR                  0xc
-+#define EIP93_REG_PE_ADDR                     0x10 /* STATE_ADDR */
-+/*
-+ * Special implementation for user ID
-+ * user_id in eip93_descriptor is used to identify the
-+ * descriptor and is opaque and can be used by the driver
-+ * in custom way.
-+ *
-+ * The usage of this should be to put an address to the crypto
-+ * request struct from the kernel but this can't work in 64bit
-+ * world.
-+ *
-+ * Also it's required to put some flags to identify the last
-+ * descriptor.
-+ *
-+ * To handle this, split the u32 in 2 part:
-+ * - 31:16 descriptor flags
-+ * - 15:0 IDR to connect the crypto request address
-+ */
-+#define EIP93_REG_PE_USER_ID                  0x18
-+#define   EIP93_PE_USER_ID_DESC_FLAGS         GENMASK(31, 16)
-+#define   EIP93_PE_USER_ID_CRYPTO_IDR         GENMASK(15, 0)
-+#define EIP93_REG_PE_LENGTH                   0x1c
-+#define   EIP93_PE_LENGTH_BYPASS              GENMASK(31, 24)
-+#define   EIP93_PE_LENGTH_HOST_PE_READY               GENMASK(23, 22)
-+#define   EIP93_PE_LENGTH_PE_READY            0x2
-+#define   EIP93_PE_LENGTH_HOST_READY          0x1
-+#define   EIP93_PE_LENGTH_LENGTH              GENMASK(19, 0)
-+
-+/* PACKET ENGINE RING configuration registers */
-+#define EIP93_REG_PE_CDR_BASE                 0x80
-+#define EIP93_REG_PE_RDR_BASE                 0x84
-+#define EIP93_REG_PE_RING_CONFIG              0x88
-+#define   EIP93_PE_EN_EXT_TRIG                        BIT(31)
-+/* Absent in later revision of eip93 */
-+/* #define   EIP93_PE_RING_OFFSET             GENMASK(23, 15) */
-+#define   EIP93_PE_RING_SIZE                  GENMASK(9, 0)
-+#define EIP93_REG_PE_RING_THRESH              0x8c
-+#define   EIPR93_PE_TIMEROUT_EN                       BIT(31)
-+#define   EIPR93_PE_RD_TIMEOUT                        GENMASK(29, 26)
-+#define   EIPR93_PE_RDR_THRESH                        GENMASK(25, 16)
-+#define   EIPR93_PE_CDR_THRESH                        GENMASK(9, 0)
-+#define EIP93_REG_PE_CD_COUNT                 0x90
-+#define   EIP93_PE_CD_COUNT                   GENMASK(10, 0)
-+/*
-+ * In the same register, writing a value in GENMASK(7, 0) will
-+ * increment the descriptor count and start DMA action.
-+ */
-+#define   EIP93_PE_CD_COUNT_INCR              GENMASK(7, 0)
-+#define EIP93_REG_PE_RD_COUNT                 0x94
-+#define   EIP93_PE_RD_COUNT                   GENMASK(10, 0)
-+/*
-+ * In the same register, writing a value in GENMASK(7, 0) will
-+ * increment the descriptor count and start DMA action.
-+ */
-+#define   EIP93_PE_RD_COUNT_INCR              GENMASK(7, 0)
-+#define EIP93_REG_PE_RING_RW_PNTR             0x98 /* RING_PNTR */
-+
-+/* PACKET ENGINE  configuration registers */
-+#define EIP93_REG_PE_CONFIG                   0x100
-+#define   EIP93_PE_CONFIG_SWAP_TARGET         BIT(20)
-+#define   EIP93_PE_CONFIG_SWAP_DATA           BIT(18)
-+#define   EIP93_PE_CONFIG_SWAP_SA             BIT(17)
-+#define   EIP93_PE_CONFIG_SWAP_CDRD           BIT(16)
-+#define   EIP93_PE_CONFIG_EN_CDR_UPDATE               BIT(10)
-+#define   EIP93_PE_CONFIG_PE_MODE             GENMASK(9, 8)
-+#define   EIP93_PE_TARGET_AUTO_RING_MODE      FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x3)
-+#define   EIP93_PE_TARGET_COMMAND_NO_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x2)
-+#define   EIP93_PE_TARGET_COMMAND_WITH_RDR_MODE       FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x1)
-+#define   EIP93_PE_DIRECT_HOST_MODE           FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x0)
-+#define   EIP93_PE_CONFIG_RST_RING            BIT(2)
-+#define   EIP93_PE_CONFIG_RST_PE              BIT(0)
-+#define EIP93_REG_PE_STATUS                   0x104
-+#define EIP93_REG_PE_BUF_THRESH                       0x10c
-+#define   EIP93_PE_OUTBUF_THRESH              GENMASK(23, 16)
-+#define   EIP93_PE_INBUF_THRESH                       GENMASK(7, 0)
-+#define EIP93_REG_PE_INBUF_COUNT              0x100
-+#define EIP93_REG_PE_OUTBUF_COUNT             0x114
-+#define EIP93_REG_PE_BUF_RW_PNTR              0x118 /* BUF_PNTR */
-+
-+/* PACKET ENGINE endian config */
-+#define EIP93_REG_PE_ENDIAN_CONFIG            0x1cc
-+#define EIP93_AIROHA_REG_PE_ENDIAN_CONFIG     0x1d0
-+#define   EIP93_PE_ENDIAN_TARGET_BYTE_SWAP    GENMASK(23, 16)
-+#define   EIP93_PE_ENDIAN_MASTER_BYTE_SWAP    GENMASK(7, 0)
-+/*
-+ * Byte goes 2 and 2 and are referenced by ID
-+ * Split GENMASK(7, 0) in 4 part, one for each byte.
-+ * Example LITTLE ENDIAN: Example BIG ENDIAN
-+ * GENMASK(7, 6) 0x3    GENMASK(7, 6) 0x0
-+ * GENMASK(5, 4) 0x2    GENMASK(7, 6) 0x1
-+ * GENMASK(3, 2) 0x1    GENMASK(3, 2) 0x2
-+ * GENMASK(1, 0) 0x0    GENMASK(1, 0) 0x3
-+ */
-+#define   EIP93_PE_ENDIAN_BYTE0                       0x0
-+#define   EIP93_PE_ENDIAN_BYTE1                       0x1
-+#define   EIP93_PE_ENDIAN_BYTE2                       0x2
-+#define   EIP93_PE_ENDIAN_BYTE3                       0x3
-+
-+/* EIP93 CLOCK control registers */
-+#define EIP93_REG_PE_CLOCK_CTRL                       0x1e8
-+#define   EIP93_PE_CLOCK_EN_HASH_CLK          BIT(4)
-+#define   EIP93_PE_CLOCK_EN_ARC4_CLK          BIT(3)
-+#define   EIP93_PE_CLOCK_EN_AES_CLK           BIT(2)
-+#define   EIP93_PE_CLOCK_EN_DES_CLK           BIT(1)
-+#define   EIP93_PE_CLOCK_EN_PE_CLK            BIT(0)
-+
-+/* EIP93 Device Option and Revision Register */
-+#define EIP93_REG_PE_OPTION_1                 0x1f4
-+#define   EIP93_PE_OPTION_MAC_KEY256          BIT(31)
-+#define   EIP93_PE_OPTION_MAC_KEY192          BIT(30)
-+#define   EIP93_PE_OPTION_MAC_KEY128          BIT(29)
-+#define   EIP93_PE_OPTION_AES_CBC_MAC         BIT(28)
-+#define   EIP93_PE_OPTION_AES_XCBX            BIT(23)
-+#define   EIP93_PE_OPTION_SHA_256             BIT(19)
-+#define   EIP93_PE_OPTION_SHA_224             BIT(18)
-+#define   EIP93_PE_OPTION_SHA_1                       BIT(17)
-+#define   EIP93_PE_OPTION_MD5                 BIT(16)
-+#define   EIP93_PE_OPTION_AES_KEY256          BIT(15)
-+#define   EIP93_PE_OPTION_AES_KEY192          BIT(14)
-+#define   EIP93_PE_OPTION_AES_KEY128          BIT(13)
-+#define   EIP93_PE_OPTION_AES                 BIT(2)
-+#define   EIP93_PE_OPTION_ARC4                        BIT(1)
-+#define   EIP93_PE_OPTION_TDES                        BIT(0) /* DES and TDES */
-+#define EIP93_REG_PE_OPTION_0                 0x1f8
-+#define EIP93_REG_PE_REVISION                 0x1fc
-+#define   EIP93_PE_REVISION_MAJ_HW_REV                GENMASK(27, 24)
-+#define   EIP93_PE_REVISION_MIN_HW_REV                GENMASK(23, 20)
-+#define   EIP93_PE_REVISION_HW_PATCH          GENMASK(19, 16)
-+#define   EIP93_PE_REVISION_EIP_NO            GENMASK(7, 0)
-+
-+/* EIP93 Interrupt Control Register */
-+#define EIP93_REG_INT_UNMASK_STAT             0x200
-+#define EIP93_REG_INT_MASK_STAT                       0x204
-+#define EIP93_REG_INT_CLR                     0x204
-+#define EIP93_REG_INT_MASK                    0x208 /* INT_EN */
-+/* Each int reg have the same bitmap */
-+#define   EIP93_INT_INTERFACE_ERR             BIT(18)
-+#define   EIP93_INT_RPOC_ERR                  BIT(17)
-+#define   EIP93_INT_PE_RING_ERR                       BIT(16)
-+#define   EIP93_INT_HALT                      BIT(15)
-+#define   EIP93_INT_OUTBUF_THRESH             BIT(11)
-+#define   EIP93_INT_INBUF_THRESH              BIT(10)
-+#define   EIP93_INT_OPERATION_DONE            BIT(9)
-+#define   EIP93_INT_RDR_THRESH                        BIT(1)
-+#define   EIP93_INT_CDR_THRESH                        BIT(0)
-+#define   EIP93_INT_ALL                               (EIP93_INT_INTERFACE_ERR | \
-+                                               EIP93_INT_RPOC_ERR | \
-+                                               EIP93_INT_PE_RING_ERR | \
-+                                               EIP93_INT_HALT | \
-+                                               EIP93_INT_OUTBUF_THRESH | \
-+                                               EIP93_INT_INBUF_THRESH | \
-+                                               EIP93_INT_OPERATION_DONE | \
-+                                               EIP93_INT_RDR_THRESH | \
-+                                               EIP93_INT_CDR_THRESH)
-+
-+#define EIP93_REG_INT_CFG                     0x20c
-+#define   EIP93_INT_TYPE_PULSE                        BIT(0)
-+#define EIP93_REG_MASK_ENABLE                 0x210
-+#define EIP93_REG_MASK_DISABLE                        0x214
-+
-+/* EIP93 SA Record register */
-+#define EIP93_REG_SA_CMD_0                    0x400
-+#define   EIP93_SA_CMD_SAVE_HASH              BIT(29)
-+#define   EIP93_SA_CMD_SAVE_IV                        BIT(28)
-+#define   EIP93_SA_CMD_HASH_SOURCE            GENMASK(27, 26)
-+#define   EIP93_SA_CMD_HASH_NO_LOAD           FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x3)
-+#define   EIP93_SA_CMD_HASH_FROM_STATE                FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x2)
-+#define   EIP93_SA_CMD_HASH_FROM_SA           FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x0)
-+#define   EIP93_SA_CMD_IV_SOURCE              GENMASK(25, 24)
-+#define   EIP93_SA_CMD_IV_FROM_PRNG           FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x3)
-+#define   EIP93_SA_CMD_IV_FROM_STATE          FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x2)
-+#define   EIP93_SA_CMD_IV_FROM_INPUT          FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x1)
-+#define   EIP93_SA_CMD_IV_NO_LOAD             FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x0)
-+#define   EIP93_SA_CMD_DIGEST_LENGTH          GENMASK(23, 20)
-+#define   EIP93_SA_CMD_DIGEST_10WORD          FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0xa) /* SRTP and TLS */
-+#define   EIP93_SA_CMD_DIGEST_8WORD           FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x8) /* SHA-256 */
-+#define   EIP93_SA_CMD_DIGEST_7WORD           FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x7) /* SHA-224 */
-+#define   EIP93_SA_CMD_DIGEST_6WORD           FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x6)
-+#define   EIP93_SA_CMD_DIGEST_5WORD           FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x5) /* SHA1 */
-+#define   EIP93_SA_CMD_DIGEST_4WORD           FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x4) /* MD5 and AES-based */
-+#define   EIP93_SA_CMD_DIGEST_3WORD_IPSEC     FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x3) /* IPSEC */
-+#define   EIP93_SA_CMD_DIGEST_2WORD           FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x2)
-+#define   EIP93_SA_CMD_DIGEST_1WORD           FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x1)
-+#define   EIP93_SA_CMD_DIGEST_3WORD           FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x0) /* 96bit output */
-+#define   EIP93_SA_CMD_HDR_PROC                       BIT(19)
-+#define   EIP93_SA_CMD_EXT_PAD                        BIT(18)
-+#define   EIP93_SA_CMD_SCPAD                  BIT(17)
-+#define   EIP93_SA_CMD_HASH                   GENMASK(15, 12)
-+#define   EIP93_SA_CMD_HASH_NULL              FIELD_PREP(EIP93_SA_CMD_HASH, 0xf)
-+#define   EIP93_SA_CMD_HASH_SHA256            FIELD_PREP(EIP93_SA_CMD_HASH, 0x3)
-+#define   EIP93_SA_CMD_HASH_SHA224            FIELD_PREP(EIP93_SA_CMD_HASH, 0x2)
-+#define   EIP93_SA_CMD_HASH_SHA1              FIELD_PREP(EIP93_SA_CMD_HASH, 0x1)
-+#define   EIP93_SA_CMD_HASH_MD5                       FIELD_PREP(EIP93_SA_CMD_HASH, 0x0)
-+#define   EIP93_SA_CMD_CIPHER                 GENMASK(11, 8)
-+#define   EIP93_SA_CMD_CIPHER_NULL            FIELD_PREP(EIP93_SA_CMD_CIPHER, 0xf)
-+#define   EIP93_SA_CMD_CIPHER_AES             FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x3)
-+#define   EIP93_SA_CMD_CIPHER_ARC4            FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x2)
-+#define   EIP93_SA_CMD_CIPHER_3DES            FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x1)
-+#define   EIP93_SA_CMD_CIPHER_DES             FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x0)
-+#define   EIP93_SA_CMD_PAD_TYPE                       GENMASK(7, 6)
-+#define   EIP93_SA_CMD_PAD_CONST_SSL          FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x6)
-+#define   EIP93_SA_CMD_PAD_TLS_DTLS           FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x5)
-+#define   EIP93_SA_CMD_PAD_ZERO                       FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x3)
-+#define   EIP93_SA_CMD_PAD_CONST              FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x2)
-+#define   EIP93_SA_CMD_PAD_PKCS7              FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x1)
-+#define   EIP93_SA_CMD_PAD_IPSEC              FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x0)
-+#define   EIP93_SA_CMD_OPGROUP                        GENMASK(5, 4)
-+#define   EIP93_SA_CMD_OP_EXT                 FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x2)
-+#define   EIP93_SA_CMD_OP_PROTOCOL            FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x1)
-+#define   EIP93_SA_CMD_OP_BASIC                       FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x0)
-+#define   EIP93_SA_CMD_DIRECTION_IN           BIT(3) /* 0: outbount 1: inbound */
-+#define   EIP93_SA_CMD_OPCODE                 GENMASK(2, 0)
-+#define   EIP93_SA_CMD_OPCODE_BASIC_OUT_PRNG  0x7
-+#define   EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH  0x3
-+#define   EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH 0x1
-+#define   EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC   0x0
-+#define   EIP93_SA_CMD_OPCODE_BASIC_IN_HASH   0x3
-+#define   EIP93_SA_CMD_OPCODE_BASIC_IN_HASH_DEC       0x1
-+#define   EIP93_SA_CMD_OPCODE_BASIC_IN_DEC    0x0
-+#define   EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_ESP        0x0
-+#define   EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SSL        0x4
-+#define   EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_TLS        0x5
-+#define   EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SRTP       0x7
-+#define   EIP93_SA_CMD_OPCODE_PROTOCOL_IN_ESP 0x0
-+#define   EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SSL 0x2
-+#define   EIP93_SA_CMD_OPCODE_PROTOCOL_IN_TLS 0x3
-+#define   EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SRTP        0x7
-+#define   EIP93_SA_CMD_OPCODE_EXT_OUT_DTSL    0x1
-+#define   EIP93_SA_CMD_OPCODE_EXT_OUT_SSL     0x4
-+#define   EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV10  0x5
-+#define   EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV11  0x6
-+#define   EIP93_SA_CMD_OPCODE_EXT_IN_DTSL     0x1
-+#define   EIP93_SA_CMD_OPCODE_EXT_IN_SSL      0x4
-+#define   EIP93_SA_CMD_OPCODE_EXT_IN_TLSV10   0x5
-+#define   EIP93_SA_CMD_OPCODE_EXT_IN_TLSV11   0x6
-+#define EIP93_REG_SA_CMD_1                    0x404
-+#define   EIP93_SA_CMD_EN_SEQNUM_CHK          BIT(29)
-+/* This mask can be either used for ARC4 or AES */
-+#define   EIP93_SA_CMD_ARC4_KEY_LENGHT                GENMASK(28, 24)
-+#define   EIP93_SA_CMD_AES_DEC_KEY            BIT(28) /* 0: encrypt key 1: decrypt key */
-+#define   EIP93_SA_CMD_AES_KEY_LENGTH         GENMASK(26, 24)
-+#define   EIP93_SA_CMD_AES_KEY_256BIT         FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x4)
-+#define   EIP93_SA_CMD_AES_KEY_192BIT         FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x3)
-+#define   EIP93_SA_CMD_AES_KEY_128BIT         FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x2)
-+#define   EIP93_SA_CMD_HASH_CRYPT_OFFSET      GENMASK(23, 16)
-+#define   EIP93_SA_CMD_BYTE_OFFSET            BIT(13) /* 0: CRYPT_OFFSET in 32bit word 1: CRYPT_OFFSET in 8bit bytes */
-+#define   EIP93_SA_CMD_HMAC                   BIT(12)
-+#define   EIP93_SA_CMD_SSL_MAC                        BIT(12)
-+/* This mask can be either used for ARC4 or AES */
-+#define   EIP93_SA_CMD_CHIPER_MODE            GENMASK(9, 8)
-+/* AES or DES operations */
-+#define   EIP93_SA_CMD_CHIPER_MODE_ICM                FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x3)
-+#define   EIP93_SA_CMD_CHIPER_MODE_CTR                FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x2)
-+#define   EIP93_SA_CMD_CHIPER_MODE_CBC                FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1)
-+#define   EIP93_SA_CMD_CHIPER_MODE_ECB                FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0)
-+/* ARC4 operations */
-+#define   EIP93_SA_CMD_CHIPER_MODE_STATEFULL  FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1)
-+#define   EIP93_SA_CMD_CHIPER_MODE_STATELESS  FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0)
-+#define   EIP93_SA_CMD_COPY_PAD                       BIT(3)
-+#define   EIP93_SA_CMD_COPY_PAYLOAD           BIT(2)
-+#define   EIP93_SA_CMD_COPY_HEADER            BIT(1)
-+#define   EIP93_SA_CMD_COPY_DIGEST            BIT(0) /* With this enabled, COPY_PAD is required */
-+
-+/* State save register */
-+#define EIP93_REG_STATE_IV_0                  0x500
-+#define EIP93_REG_STATE_IV_1                  0x504
-+
-+#define EIP93_REG_PE_ARC4STATE                        0x700
-+
-+struct sa_record {
-+      u32 sa_cmd0_word;
-+      u32 sa_cmd1_word;
-+      u32 sa_key[8];
-+      u8 sa_i_digest[32];
-+      u8 sa_o_digest[32];
-+      u32 sa_spi;
-+      u32 sa_seqnum[2];
-+      u32 sa_seqmum_mask[2];
-+      u32 sa_nonce;
-+} __packed;
-+
-+struct sa_state {
-+      u32 state_iv[4];
-+      u32 state_byte_cnt[2];
-+      u8 state_i_digest[32];
-+} __packed;
-+
-+struct eip93_descriptor {
-+      u32 pe_ctrl_stat_word;
-+      u32 src_addr;
-+      u32 dst_addr;
-+      u32 sa_addr;
-+      u32 state_addr;
-+      u32 arc4_addr;
-+      u32 user_id;
-+      u32 pe_length_word;
-+} __packed;
-+
-+#endif
diff --git a/target/linux/airoha/patches-6.6/300-spi-Add-support-for-the-Airoha-EN7523-SoC-SPI-contro.patch b/target/linux/airoha/patches-6.6/300-spi-Add-support-for-the-Airoha-EN7523-SoC-SPI-contro.patch
deleted file mode 100644 (file)
index d31f7fd..0000000
+++ /dev/null
@@ -1,341 +0,0 @@
---- a/drivers/spi/Kconfig
-+++ b/drivers/spi/Kconfig
-@@ -363,6 +363,12 @@ config SPI_DLN2
-        This driver can also be built as a module.  If so, the module
-        will be called spi-dln2.
-+config SPI_AIROHA_EN7523
-+      bool "Airoha EN7523 SPI controller support"
-+      depends on ARCH_AIROHA
-+      help
-+        This enables SPI controller support for the Airoha EN7523 SoC.
-+
- config SPI_EP93XX
-       tristate "Cirrus Logic EP93xx SPI controller"
-       depends on ARCH_EP93XX || COMPILE_TEST
---- a/drivers/spi/Makefile
-+++ b/drivers/spi/Makefile
-@@ -51,6 +51,7 @@ obj-$(CONFIG_SPI_DW_BT1)             += spi-dw-bt1.
- obj-$(CONFIG_SPI_DW_MMIO)             += spi-dw-mmio.o
- obj-$(CONFIG_SPI_DW_PCI)              += spi-dw-pci.o
- obj-$(CONFIG_SPI_EP93XX)              += spi-ep93xx.o
-+obj-$(CONFIG_SPI_AIROHA_EN7523)               += spi-en7523.o
- obj-$(CONFIG_SPI_FALCON)              += spi-falcon.o
- obj-$(CONFIG_SPI_FSI)                 += spi-fsi.o
- obj-$(CONFIG_SPI_FSL_CPM)             += spi-fsl-cpm.o
---- /dev/null
-+++ b/drivers/spi/spi-en7523.c
-@@ -0,0 +1,313 @@
-+// SPDX-License-Identifier: GPL-2.0
-+
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+#include <linux/mod_devicetable.h>
-+#include <linux/spi/spi.h>
-+
-+
-+#define ENSPI_READ_IDLE_EN                    0x0004
-+#define ENSPI_MTX_MODE_TOG                    0x0014
-+#define ENSPI_RDCTL_FSM                               0x0018
-+#define ENSPI_MANUAL_EN                               0x0020
-+#define ENSPI_MANUAL_OPFIFO_EMPTY             0x0024
-+#define ENSPI_MANUAL_OPFIFO_WDATA             0x0028
-+#define ENSPI_MANUAL_OPFIFO_FULL              0x002C
-+#define ENSPI_MANUAL_OPFIFO_WR                        0x0030
-+#define ENSPI_MANUAL_DFIFO_FULL                       0x0034
-+#define ENSPI_MANUAL_DFIFO_WDATA              0x0038
-+#define ENSPI_MANUAL_DFIFO_EMPTY              0x003C
-+#define ENSPI_MANUAL_DFIFO_RD                 0x0040
-+#define ENSPI_MANUAL_DFIFO_RDATA              0x0044
-+#define ENSPI_IER                             0x0090
-+#define ENSPI_NFI2SPI_EN                      0x0130
-+
-+// TODO not in spi block
-+#define ENSPI_CLOCK_DIVIDER                   ((void __iomem *)0x1fa201c4)
-+
-+#define       OP_CSH                                  0x00
-+#define       OP_CSL                                  0x01
-+#define       OP_CK                                   0x02
-+#define       OP_OUTS                                 0x08
-+#define       OP_OUTD                                 0x09
-+#define       OP_OUTQ                                 0x0A
-+#define       OP_INS                                  0x0C
-+#define       OP_INS0                                 0x0D
-+#define       OP_IND                                  0x0E
-+#define       OP_INQ                                  0x0F
-+#define       OP_OS2IS                                0x10
-+#define       OP_OS2ID                                0x11
-+#define       OP_OS2IQ                                0x12
-+#define       OP_OD2IS                                0x13
-+#define       OP_OD2ID                                0x14
-+#define       OP_OD2IQ                                0x15
-+#define       OP_OQ2IS                                0x16
-+#define       OP_OQ2ID                                0x17
-+#define       OP_OQ2IQ                                0x18
-+#define       OP_OSNIS                                0x19
-+#define       OP_ODNID                                0x1A
-+
-+#define MATRIX_MODE_AUTO              1
-+#define   CONF_MTX_MODE_AUTO          0
-+#define   MANUALEN_AUTO                       0
-+#define MATRIX_MODE_MANUAL            0
-+#define   CONF_MTX_MODE_MANUAL                9
-+#define   MANUALEN_MANUAL             1
-+
-+#define _ENSPI_MAX_XFER                       0x1ff
-+
-+#define REG(x)                        (iobase + x)
-+
-+
-+static void __iomem *iobase;
-+
-+
-+static void opfifo_write(u32 cmd, u32 len)
-+{
-+      u32 tmp = ((cmd & 0x1f) << 9) | (len & 0x1ff);
-+
-+      writel(tmp, REG(ENSPI_MANUAL_OPFIFO_WDATA));
-+
-+      /* Wait for room in OPFIFO */
-+      while (readl(REG(ENSPI_MANUAL_OPFIFO_FULL)))
-+              ;
-+
-+      /* Shift command into OPFIFO */
-+      writel(1, REG(ENSPI_MANUAL_OPFIFO_WR));
-+
-+      /* Wait for command to finish */
-+      while (!readl(REG(ENSPI_MANUAL_OPFIFO_EMPTY)))
-+              ;
-+}
-+
-+static void set_cs(int state)
-+{
-+      if (state)
-+              opfifo_write(OP_CSH, 1);
-+      else
-+              opfifo_write(OP_CSL, 1);
-+}
-+
-+static void manual_begin_cmd(void)
-+{
-+      /* Disable read idle state */
-+      writel(0, REG(ENSPI_READ_IDLE_EN));
-+
-+      /* Wait for FSM to reach idle state */
-+      while (readl(REG(ENSPI_RDCTL_FSM)))
-+              ;
-+
-+      /* Set SPI core to manual mode */
-+      writel(CONF_MTX_MODE_MANUAL, REG(ENSPI_MTX_MODE_TOG));
-+      writel(MANUALEN_MANUAL, REG(ENSPI_MANUAL_EN));
-+}
-+
-+static void manual_end_cmd(void)
-+{
-+      /* Set SPI core to auto mode */
-+      writel(CONF_MTX_MODE_AUTO, REG(ENSPI_MTX_MODE_TOG));
-+      writel(MANUALEN_AUTO, REG(ENSPI_MANUAL_EN));
-+
-+      /* Enable read idle state */
-+      writel(1, REG(ENSPI_READ_IDLE_EN));
-+}
-+
-+static void dfifo_read(u8 *buf, int len)
-+{
-+      int i;
-+
-+      for (i = 0; i < len; i++) {
-+              /* Wait for requested data to show up in DFIFO */
-+              while (readl(REG(ENSPI_MANUAL_DFIFO_EMPTY)))
-+                      ;
-+              buf[i] = readl(REG(ENSPI_MANUAL_DFIFO_RDATA));
-+              /* Queue up next byte */
-+              writel(1, REG(ENSPI_MANUAL_DFIFO_RD));
-+      }
-+}
-+
-+static void dfifo_write(const u8 *buf, int len)
-+{
-+      int i;
-+
-+      for (i = 0; i < len; i++) {
-+              /* Wait for room in DFIFO */
-+              while (readl(REG(ENSPI_MANUAL_DFIFO_FULL)))
-+                      ;
-+              writel(buf[i], REG(ENSPI_MANUAL_DFIFO_WDATA));
-+      }
-+}
-+
-+#if 0
-+static void set_spi_clock_speed(int freq_mhz)
-+{
-+      u32 tmp, val;
-+
-+      tmp = readl(ENSPI_CLOCK_DIVIDER);
-+      tmp &= 0xffff0000;
-+      writel(tmp, ENSPI_CLOCK_DIVIDER);
-+
-+      val = (400 / (freq_mhz * 2));
-+      tmp |= (val << 8) | 1;
-+      writel(tmp, ENSPI_CLOCK_DIVIDER);
-+}
-+#endif
-+
-+static void init_hw(void)
-+{
-+      /* Disable manual/auto mode clash interrupt */
-+      writel(0, REG(ENSPI_IER));
-+
-+      // TODO via clk framework
-+      // set_spi_clock_speed(50);
-+
-+      /* Disable DMA */
-+      writel(0, REG(ENSPI_NFI2SPI_EN));
-+}
-+
-+static int xfer_read(struct spi_transfer *xfer)
-+{
-+      int opcode;
-+      uint8_t *buf = xfer->rx_buf;
-+
-+      switch (xfer->rx_nbits) {
-+      case SPI_NBITS_SINGLE:
-+              opcode = OP_INS;
-+              break;
-+      case SPI_NBITS_DUAL:
-+              opcode = OP_IND;
-+              break;
-+      case SPI_NBITS_QUAD:
-+              opcode = OP_INQ;
-+              break;
-+      }
-+
-+      opfifo_write(opcode, xfer->len);
-+      dfifo_read(buf, xfer->len);
-+
-+      return xfer->len;
-+}
-+
-+static int xfer_write(struct spi_transfer *xfer, int next_xfer_is_rx)
-+{
-+      int opcode;
-+      const uint8_t *buf = xfer->tx_buf;
-+
-+      if (next_xfer_is_rx) {
-+              /* need to use Ox2Ix opcode to set the core to input afterwards */
-+              switch (xfer->tx_nbits) {
-+              case SPI_NBITS_SINGLE:
-+                      opcode = OP_OS2IS;
-+                      break;
-+              case SPI_NBITS_DUAL:
-+                      opcode = OP_OS2ID;
-+                      break;
-+              case SPI_NBITS_QUAD:
-+                      opcode = OP_OS2IQ;
-+                      break;
-+              }
-+      } else {
-+              switch (xfer->tx_nbits) {
-+              case SPI_NBITS_SINGLE:
-+                      opcode = OP_OUTS;
-+                      break;
-+              case SPI_NBITS_DUAL:
-+                      opcode = OP_OUTD;
-+                      break;
-+              case SPI_NBITS_QUAD:
-+                      opcode = OP_OUTQ;
-+                      break;
-+              }
-+      }
-+
-+      opfifo_write(opcode, xfer->len);
-+      dfifo_write(buf, xfer->len);
-+
-+      return xfer->len;
-+}
-+
-+size_t max_transfer_size(struct spi_device *spi)
-+{
-+      return _ENSPI_MAX_XFER;
-+}
-+
-+int transfer_one_message(struct spi_controller *ctrl, struct spi_message *msg)
-+{
-+      struct spi_transfer *xfer;
-+      int next_xfer_is_rx = 0;
-+
-+      manual_begin_cmd();
-+      set_cs(0);
-+      list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-+              if (xfer->tx_buf) {
-+                      if (!list_is_last(&xfer->transfer_list, &msg->transfers)
-+                          && list_next_entry(xfer, transfer_list)->rx_buf != NULL)
-+                              next_xfer_is_rx = 1;
-+                      else
-+                              next_xfer_is_rx = 0;
-+                      msg->actual_length += xfer_write(xfer, next_xfer_is_rx);
-+              } else if (xfer->rx_buf) {
-+                      msg->actual_length += xfer_read(xfer);
-+              }
-+      }
-+      set_cs(1);
-+      manual_end_cmd();
-+
-+      msg->status = 0;
-+      spi_finalize_current_message(ctrl);
-+
-+      return 0;
-+}
-+
-+static int spi_probe(struct platform_device *pdev)
-+{
-+      struct spi_controller *ctrl;
-+      int err;
-+
-+      ctrl = devm_spi_alloc_master(&pdev->dev, 0);
-+      if (!ctrl) {
-+              dev_err(&pdev->dev, "Error allocating SPI controller\n");
-+              return -ENOMEM;
-+      }
-+
-+      iobase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
-+      if (IS_ERR(iobase)) {
-+              dev_err(&pdev->dev, "Could not map SPI register address");
-+              return -ENOMEM;
-+      }
-+
-+      init_hw();
-+
-+      ctrl->dev.of_node = pdev->dev.of_node;
-+      ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX;
-+      ctrl->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL;
-+      ctrl->max_transfer_size = max_transfer_size;
-+      ctrl->transfer_one_message = transfer_one_message;
-+      err = devm_spi_register_controller(&pdev->dev, ctrl);
-+      if (err) {
-+              dev_err(&pdev->dev, "Could not register SPI controller\n");
-+              return -ENODEV;
-+      }
-+
-+      return 0;
-+}
-+
-+static const struct of_device_id spi_of_ids[] = {
-+      { .compatible = "airoha,en7523-spi" },
-+      { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, spi_of_ids);
-+
-+static struct platform_driver spi_driver = {
-+      .probe = spi_probe,
-+      .driver = {
-+              .name = "airoha-en7523-spi",
-+              .of_match_table = spi_of_ids,
-+      },
-+};
-+
-+module_platform_driver(spi_driver);
-+
-+MODULE_LICENSE("GPL v2");
-+MODULE_AUTHOR("Bert Vermeulen <bert@biot.com>");
-+MODULE_DESCRIPTION("Airoha EN7523 SPI driver");
diff --git a/target/linux/airoha/patches-6.6/900-airoha-bmt-support.patch b/target/linux/airoha/patches-6.6/900-airoha-bmt-support.patch
deleted file mode 100644 (file)
index 5ba31c0..0000000
+++ /dev/null
@@ -1,578 +0,0 @@
---- /dev/null
-+++ b/drivers/mtd/nand/airoha_bmt.c
-@@ -0,0 +1,575 @@
-+
-+/*
-+ * Airoha BMT algorithm
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/slab.h>
-+#include <linux/types.h> 
-+#include <linux/module.h> 
-+#include <linux/moduleparam.h>  
-+#include "mtk_bmt.h"
-+
-+#define MAX_BMT_SIZE          (250) 
-+#define MAX_RAW_BAD_BLOCK_SIZE  (250)
-+#define POOL_GOOD_BLOCK_PERCENT 8/100
-+#define MAX_BMT_PERCENT         1/8
-+
-+typedef struct {
-+    char signature[3];
-+    u8 version;
-+    u8 bad_count;  // this field is useless 
-+    u8 size; 
-+    u8 checksum;
-+    u8 reseverd[13];
-+} bmt_table_header;
-+
-+typedef struct {
-+    u16 from;      
-+    u16 to; 
-+} bmt_entry;
-+
-+typedef struct {
-+    bmt_table_header header;
-+    bmt_entry table[MAX_BMT_SIZE];
-+} bmt_table;
-+
-+typedef struct {
-+    char signature[4];
-+    u32 checksum;
-+    u8 version;
-+    u8 size;
-+    u8 reserved[2]; 
-+} bbt_table_header;
-+
-+typedef struct {
-+    bbt_table_header header;
-+    u16            table[MAX_RAW_BAD_BLOCK_SIZE];
-+} bbt_table;
-+
-+bbt_table bbt; 
-+bmt_table bmt;
-+ 
-+int bmt_index=0xffff; 
-+int bbt_index=0xffff; 
-+unsigned int total_blks  , system_blks , bmt_blks, _to, _to2, val;
-+
-+module_param(bmt_index,    int,  S_IRUSR  |  S_IWUSR);
-+module_param(bbt_index,    int,  S_IRUSR  |  S_IWUSR);
-+module_param(total_blks,   int,  S_IRUSR  |  S_IWUSR);
-+module_param(system_blks,  int,  S_IRUSR  |  S_IWUSR);
-+module_param(bmt_blks,     int,  S_IRUSR  |  S_IWUSR);
-+module_param(_to,          int,  S_IRUSR  |  S_IWUSR);
-+module_param(_to2,         int,  S_IRUSR  |  S_IWUSR);
-+module_param(val,          int,  S_IRUSR  |  S_IWUSR);
-+
-+
-+static bool is_bad_raw(int block) { 
-+      u8 fdm[4];
-+      int ret;
-+      ret = bbt_nand_read(blk_pg(block), bmtd.data_buf, bmtd.pg_size,
-+                          fdm, sizeof(fdm));
-+      if (ret || fdm[0] != 0xff ){
-+              return true; 
-+      }
-+      return false;
-+}
-+
-+static bool is_bad( int block) { 
-+      u8 fdm[4];
-+      int ret;
-+      ret = bbt_nand_read(blk_pg(block), bmtd.data_buf, bmtd.pg_size,
-+                          fdm, sizeof(fdm));
-+      //printk("%x %x %x %x\n", fdm[0],  fdm[1],  fdm[2],  fdm[3]);
-+      if (ret || fdm[0] != 0xff || fdm[1] != 0xff ){
-+              return true; 
-+      }
-+      return false;
-+}
-+
-+
-+static bool is_mapped( int block) {
-+      u16 mapped_block;
-+      u8 fdm[4];
-+      int ret;
-+
-+      ret = bbt_nand_read(blk_pg(block), bmtd.data_buf, bmtd.pg_size,
-+                          fdm, sizeof(fdm));
-+      mapped_block = (fdm[2] << 8) |  fdm[3];
-+      //printk("%u is mapped to %d\n", mapped_block);
-+      if (mapped_block == 0xffff) 
-+              return false;
-+      else return true;
-+}
-+
-+static void mark_bad(int block) { 
-+      u8 fdm[4] = {0xff, 0xff, 0xff, 0xff};
-+      struct mtd_oob_ops ops = { 
-+              .mode = MTD_OPS_PLACE_OOB,
-+              .ooboffs = 0,
-+              .ooblen = 4, 
-+              .oobbuf = fdm,
-+              .datbuf = NULL,
-+              .len = 0,
-+      };
-+      int retlen;
-+
-+      printk("marking bad :%d\n", block);
-+      if (block < system_blks) 
-+              fdm[0] = 0x00;
-+      else fdm[1] = 0x00;
-+
-+      retlen = bmtd._write_oob(bmtd.mtd, block << bmtd.blk_shift , &ops) ;
-+      if (retlen < 0) {
-+              printk("marking bad block failed \n");
-+      }
-+}
-+
-+
-+static void mark_good(int block) { 
-+      u8 fdm[4] = {0xff, 0xff, 0xff, 0xff};
-+      struct mtd_oob_ops ops = { 
-+              .mode = MTD_OPS_PLACE_OOB,
-+              .ooboffs = 0,
-+              .ooblen = 4, 
-+              .oobbuf = fdm,
-+              .datbuf = NULL,
-+              .len = 0,
-+      };
-+      int retlen; 
-+      retlen = bmtd._write_oob(bmtd.mtd, block << bmtd.blk_shift , &ops) ;
-+      if (retlen < 0) { 
-+              printk("marking bad block failed \n");
-+      }
-+}
-+
-+static void make_mapping(u16 from , u16 to) { 
-+      u8 fdm[4] = {0xff, 0xff, 0xff , 0xff};
-+      struct mtd_oob_ops ops = { 
-+              .mode = MTD_OPS_PLACE_OOB,
-+              .ooboffs = 0,
-+              .ooblen = 4, 
-+              .oobbuf = fdm,
-+              .datbuf = NULL,
-+              .len = 0,
-+      };
-+      int retlen; 
-+
-+      memcpy(fdm + 2, &to, sizeof(to)); // this has to be exactly like this .
-+      retlen = bmtd._write_oob(bmtd.mtd, from << bmtd.blk_shift , &ops) ;
-+      if (retlen < 0) { 
-+              printk("marking bad block failed \n");
-+      }
-+}
-+
-+static u16 bbt_checksum(void) { 
-+      int i=0;
-+      u16 checksum =0;
-+      u8 *data = (u8*) &bbt; 
-+      checksum += bbt.header.version; 
-+      checksum += bbt.header.size;
-+      data += sizeof(bbt_table_header);
-+      for (; i < sizeof(bbt.table); i++)
-+              checksum += data[i];
-+      return checksum; 
-+}
-+
-+static bool parse_bbt(void) { 
-+      int i = system_blks;
-+      u8 fdm[4];
-+      for (; i < total_blks; i++)  { 
-+              if( !is_bad(i)
-+                 && !bbt_nand_read(blk_pg(i),(unsigned char *)&bbt, sizeof(bbt), fdm, sizeof(fdm))
-+                 && (strncmp(bbt.header.signature , "RAWB", 4)==0)
-+                 && (bbt.header.checksum == bbt_checksum())
-+                ) { 
-+                        bbt_index = i; 
-+                        return true; 
-+                } 
-+      }
-+      return false; 
-+}
-+
-+static u8  bmt_checksum(void) { 
-+      int i; 
-+      u8 checksum = 0;
-+      u8* data = (u8*)&bmt;
-+      checksum += bmt.header.version; 
-+      checksum += bmt.header.size;
-+      data += sizeof(bmt_table_header);
-+      for (i=0;i<bmt_blks*sizeof(bmt_entry);i++)
-+              checksum += data[i];
-+      return checksum;
-+}
-+
-+static bool parse_bmt(void) { 
-+      int i = total_blks-1 ; 
-+      u8 fdm[4];
-+      for (; i> system_blks;i--) { 
-+              if ( !is_bad(i)
-+                       && !bbt_nand_read(blk_pg(i),(unsigned char *)&bmt, sizeof(bmt), fdm, sizeof(fdm))
-+                       && (strncmp(bmt.header.signature , "BMT", 3)==0)
-+                       && (bmt.header.checksum == bmt_checksum())
-+              ) { 
-+                      bmt_index = i ; 
-+                      return true;
-+              }
-+      }
-+      return false; 
-+}
-+
-+static void variable_setup(void) { 
-+      unsigned int need_valid_block_num;
-+      int valid_blks = 0;
-+      int last_blk;
-+
-+      total_blks = bmtd.total_blks;
-+      last_blk = total_blks - 1;
-+      need_valid_block_num = total_blks * POOL_GOOD_BLOCK_PERCENT;
-+
-+      for (; last_blk > 0 ;last_blk--) { 
-+              if (is_bad_raw(last_blk)) { 
-+                      continue; 
-+              } 
-+              valid_blks++; 
-+              if (valid_blks == need_valid_block_num) { 
-+                      break;
-+              }
-+      }
-+      bmt_blks = total_blks - last_blk; 
-+      system_blks = total_blks - bmt_blks;
-+      bmtd.mtd->size = (total_blks -  total_blks * MAX_BMT_PERCENT) * bmtd.mtd->erasesize;
-+}
-+
-+
-+static int find_available_block(bool start_from_end) { 
-+      int i=system_blks,d=1;
-+      int count = 0;
-+      if (start_from_end)
-+              i=total_blks-1,d=-1;
-+      for (; count < (total_blks - system_blks); count++, i+=d) { 
-+              if(bmt_index == i || bbt_index == i || is_bad(i) || is_mapped(i)) 
-+                      continue;
-+              return i ;
-+      }
-+      //TODO: handle OOM
-+      return -1;
-+}
-+
-+static void update_bmt_bbt( void ) {
-+      int retlen  = 0;
-+      struct mtd_oob_ops  ops , ops1;
-+
-+      bbt.header.checksum = bbt_checksum();
-+      bmt.header.checksum = bmt_checksum();
-+      
-+      if(bbt_index ==0xffff) bbt_index = find_available_block(false);
-+      if(bmt_index ==0xffff) bmt_index = find_available_block(true);
-+
-+      bbt_nand_erase(bmt_index);
-+      bbt_nand_erase(bbt_index);
-+      printk("putting back in bbt_index: %d, bmt_index: %d\n" , bbt_index, bmt_index);
-+
-+      ops = (struct mtd_oob_ops) { 
-+              .mode = MTD_OPS_PLACE_OOB,
-+              .ooboffs = 0,
-+              .ooblen = 0,
-+              .oobbuf = NULL,
-+              .len = sizeof(bmt),
-+              .datbuf = (u8 *)&bmt,
-+      };
-+
-+retry_bmt:
-+      retlen  = bmtd._write_oob(bmtd.mtd, bmt_index << bmtd.blk_shift, &ops);
-+      if (retlen) { 
-+              printk("error while write");
-+              mark_bad(bmt_index);
-+              if (bmt_index > system_blks) { 
-+                      bmt_index--; 
-+                      goto retry_bmt;
-+              }
-+              return;
-+      }
-+      ops1 = (struct mtd_oob_ops) { 
-+              .mode = MTD_OPS_PLACE_OOB,
-+              .ooboffs = 0,
-+              .ooblen = 0,
-+              .oobbuf = NULL,
-+              .len = sizeof(bbt),
-+              .datbuf = (u8 *)&bbt,
-+      };
-+
-+retry_bbt:
-+      retlen  = bmtd._write_oob(bmtd.mtd, bbt_index << bmtd.blk_shift, &ops1);
-+      if (retlen) { 
-+              printk("error while write");
-+              mark_bad(bbt_index);
-+              if (bbt_index < total_blks) { 
-+                      bbt_index++;
-+                      goto retry_bbt;
-+              }
-+              return;
-+      }
-+}
-+
-+static bool is_in_bmt(int block) { 
-+      int i;
-+      for (i=0;i<bmt.header.size;i++) 
-+              if (bmt.table[i].from == block)
-+                      return true;
-+      return false; 
-+}
-+
-+static void reconstruct_from_oob(void) { 
-+      int i;
-+
-+      memset(&bmt,0x00,sizeof(bmt));
-+      memcpy(&bmt.header.signature, "BMT",3);
-+      bmt.header.version = 1; 
-+      bmt.header.size = 0;
-+      for ( i = total_blks -1 ; i >= system_blks ;i--) { 
-+              unsigned short mapped_block;
-+              u8 fdm[4];
-+              int ret;
-+
-+              if (is_bad(i)) continue;
-+              ret = bbt_nand_read(blk_pg(i), bmtd.data_buf, bmtd.pg_size,
-+                                  fdm, sizeof(fdm));
-+              if (ret < 0)
-+                      mark_bad(i);
-+
-+              memcpy(&mapped_block,fdm+2,2); // need to be this way
-+              if (mapped_block >= system_blks) continue; 
-+              printk("block %X was mapped to :%X\n", mapped_block, i);
-+              bmt.table[bmt.header.size++] = (bmt_entry){.from = mapped_block , .to = i};
-+      }
-+      memset(&bbt,0x00,sizeof(bbt));
-+      memcpy(&bbt.header.signature , "RAWB", 4);
-+      bbt.header.version  = 1;
-+      bbt.header.size = 0;
-+      for ( i = 0 ; i < system_blks; i++) { 
-+              if (is_bad_raw(i) && !is_in_bmt(i))
-+                      bbt.table[bbt.header.size++] = (u16)i;
-+      }
-+      bmt.header.checksum = bmt_checksum();
-+      bbt.header.checksum = bbt_checksum();
-+      update_bmt_bbt();
-+      printk("bbt and bmt reconstructed successfully\n");
-+}
-+
-+
-+static bool remap_block(u16 block , u16 mapped_block, int copy_len) { 
-+      bool mapped_already_in_bbt = false;
-+      bool mapped_already_in_bmt = false;
-+      bool block_already_in_bbt = false;
-+      u16 new_block = find_available_block(false);
-+      int i; 
-+      // TODO check for -1
-+
-+      bbt_nand_erase(new_block);
-+      if (copy_len)
-+              bbt_nand_copy(new_block , mapped_block , copy_len);
-+
-+      for (i=0; i < bmt.header.size; i++)
-+              if (bmt.table[i].from == block) { 
-+                      bmt.table[i].to = new_block;
-+                      mapped_already_in_bmt = true;
-+                      break;
-+              }
-+
-+      if (!mapped_already_in_bmt)
-+              bmt.table[bmt.header.size++] = (bmt_entry){ .from = block, .to = new_block};
-+
-+      for (i=0;i<bbt.header.size;i++) 
-+              if (bbt.table[i] == mapped_block) { 
-+                      mapped_already_in_bbt = true; 
-+                      break;
-+              } else if (bbt.table[i] == block) { 
-+                      block_already_in_bbt = true;
-+                      break;
-+              }
-+      
-+      if (!mapped_already_in_bbt) 
-+              bbt.table[bbt.header.size++] = mapped_block;
-+      if (mapped_block != block && !block_already_in_bbt) 
-+              bbt.table[bbt.header.size++] = block;
-+
-+      if (mapped_block != block) mark_bad(mapped_block);
-+      mark_bad(block);
-+      make_mapping(new_block, block);
-+
-+      update_bmt_bbt();
-+      return false; 
-+}
-+
-+static int init(struct device_node *np) {
-+      variable_setup();
-+      if (!(parse_bbt() && parse_bmt()))  { 
-+              reconstruct_from_oob();
-+      } else { 
-+              printk("bmt/bbt found\n");
-+      }
-+      return 0;
-+}
-+
-+static  int get_mapping_block( int block) { 
-+      int i;
-+
-+      if (block > system_blks) 
-+              return block;
-+      for (i = 0; i < bmt.header.size; i++)
-+              if (bmt.table[i].from == block) 
-+                      return bmt.table[i].to;
-+      return block;
-+}
-+
-+static void unmap_block( u16 block) {  // not required 
-+      printk("unmapping is called on block : %d\n", block);   
-+}
-+
-+
-+static int  debug( void* data , u64 cmd) { 
-+      int i;
-+      printk("val: %d\n", val);
-+      printk("_to: %d\n", _to);
-+      if (val == 0 ) { 
-+              printk("fixing all\n");
-+              for (i=0;i<total_blks;i++) { 
-+                      mark_good(i);
-+              }
-+      } else if(val ==1 ) { 
-+              int mapped_block;
-+              printk("remapping: %d\n", _to);
-+              mapped_block = get_mapping_block(_to);
-+              printk("before mapped to: %d\n", mapped_block);
-+              remap_block(_to , mapped_block, bmtd.mtd->erasesize);
-+              mapped_block = get_mapping_block(_to);
-+              printk("after mapped to: %d\n", mapped_block);
-+      } else if(val ==2 ) { 
-+              printk("bmt table: \n");
-+              for (i = 0 ; i < bmt.header.size;i++) { 
-+                      printk("%d->%d\n", bmt.table[i].from , bmt.table[i].to);
-+              }
-+              printk("bbt table\n");
-+              for (i =0;i< bbt.header.size;i++) { 
-+                      printk("%d ", bbt.table[i]);
-+              }
-+              printk("\n");
-+      } else if(val == 3) { 
-+              printk("reconstruct from oob\n");
-+              reconstruct_from_oob();
-+      } else if (val == 4) { 
-+              printk("showing the oobreconstruct_from_oob of %d\n", _to);
-+              printk("%d\n",is_bad(_to));
-+      } else if (val == 5 ) {
-+              printk("trying to parse_bmt again %d\n", parse_bmt());
-+      } else if (val == 6 ) {
-+              printk("marking bad : %d", _to);
-+              mark_bad(_to);
-+      } else if ( val == 7) { 
-+              struct mtd_oob_ops opsk = { 
-+                      .mode = MTD_OPS_PLACE_OOB, 
-+                      .ooboffs = 0, 
-+                      .ooblen = 0, 
-+                      .oobbuf = NULL,
-+                      .len = sizeof(bmt),
-+                      .datbuf = (u8 *)&bmt,
-+              };
-+              int retlen;
-+              printk("parse bmt from the %d block \n", _to);
-+              retlen = bmtd._read_oob(bmtd.mtd, _to << bmtd.blk_shift , &opsk);
-+
-+              printk("status : %d\n", retlen);
-+      } else if (val == 8) {
-+              u8 *data;
-+              int j;
-+              printk("dump bmt hex\n");
-+              data = (u8 *)&bmt;
-+              for (j =0;j < 50;j++) { 
-+                      if(j%20==0) printk("\n");
-+                      printk("%X ", data[j]);
-+              }
-+              printk("bbt table\n");
-+              data = (u8 *)&bbt;
-+              for (j =0;j < 50;j++) { 
-+                      if(j%20==0) printk("\n");
-+                      printk("%X ", data[j]);
-+              }
-+      } else if (val == 9) { 
-+              struct mtd_oob_ops ops = { 
-+                      .mode = MTD_OPS_PLACE_OOB,
-+                      .ooboffs = 0,
-+                      .ooblen = 0,
-+                      .oobbuf = NULL,
-+                      .len = sizeof(bmt),
-+                      .datbuf = (u8 *)&bmt,
-+              };
-+              int retlen;
-+              printk("put bmt at index\n");
-+              retlen  = bmtd._write_oob(bmtd.mtd, _to << bmtd.blk_shift, &ops);
-+              bmt.header.checksum = bmt_checksum();
-+              if (retlen < 0) { 
-+                      printk("error while write");
-+              }
-+      } else if (val == 10) { 
-+              printk("erase block %d\n", _to);
-+              bbt_nand_erase(_to);
-+      } else if (val == 11) {
-+              char *buf1, *buf2;
-+              struct mtd_oob_ops ops = { 
-+                      .mode = MTD_OPS_PLACE_OOB,
-+                      .ooboffs = 0,
-+                      .ooblen = 0,
-+                      .oobbuf = NULL,
-+              };
-+              struct mtd_oob_ops ops1 = { 
-+                      .mode = MTD_OPS_PLACE_OOB,
-+                      .ooboffs = 0,
-+                      .ooblen = 0,
-+                      .oobbuf = NULL,
-+              };
-+              int retlen;
-+              int j;
-+
-+              printk("tranfering content from block :%d to %d\n", _to , _to2);
-+              bbt_nand_copy(_to2, _to, bmtd.mtd->erasesize);
-+              printk("now we check size\n");
-+
-+              buf1 = (char*) kzalloc(sizeof(char) * bmtd.mtd->erasesize , GFP_KERNEL);
-+              buf2 = (char*) kzalloc(sizeof(char) * bmtd.mtd->erasesize , GFP_KERNEL);
-+
-+              ops.len = sizeof(char) * bmtd.mtd->erasesize;
-+              ops.datbuf = buf1;
-+              retlen  = bmtd._read_oob(bmtd.mtd, _to << bmtd.blk_shift, &ops);
-+              if (retlen < 0) { 
-+                      printk("error while write\n");
-+              }
-+
-+              ops1.len = sizeof(char) * bmtd.mtd->erasesize;
-+              ops1.datbuf = buf2;
-+              retlen  = bmtd._read_oob(bmtd.mtd, _to << bmtd.blk_shift, &ops1);
-+              if (retlen < 0) { 
-+                      printk("error while write");
-+              }
-+              for (j = 0 ; j < bmtd.mtd->erasesize ;j++) { 
-+                      if (j%20==0) { 
-+                              printk("\n");
-+                      }
-+                      printk("%X %X ", buf1[j], buf2[j]);
-+              }
-+              printk("\n");
-+
-+      }
-+      return 0;
-+}
-+
-+
-+const struct mtk_bmt_ops airoha_bmt_ops = {
-+      .sig = "bmt",
-+      .sig_len = 3,
-+      .init = init,
-+      .remap_block = remap_block,
-+      .unmap_block = unmap_block,
-+      .get_mapping_block = get_mapping_block,
-+      .debug = debug,
-+};
diff --git a/target/linux/airoha/patches-6.6/901-snand-mtk-bmt-support.patch b/target/linux/airoha/patches-6.6/901-snand-mtk-bmt-support.patch
deleted file mode 100644 (file)
index 90619d9..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
---- a/drivers/mtd/nand/spi/core.c
-+++ b/drivers/mtd/nand/spi/core.c
-@@ -19,6 +19,7 @@
- #include <linux/string.h>
- #include <linux/spi/spi.h>
- #include <linux/spi/spi-mem.h>
-+#include <linux/mtd/mtk_bmt.h>
- static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
- {
-@@ -1352,6 +1353,7 @@ static int spinand_probe(struct spi_mem
-       if (ret)
-               return ret;
-+      mtk_bmt_attach(mtd);
-       ret = mtd_device_register(mtd, NULL, 0);
-       if (ret)
-               goto err_spinand_cleanup;
-@@ -1359,6 +1361,7 @@ static int spinand_probe(struct spi_mem
-       return 0;
- err_spinand_cleanup:
-+      mtk_bmt_detach(mtd);
-       spinand_cleanup(spinand);
-       return ret;
-@@ -1377,6 +1380,7 @@ static int spinand_remove(struct spi_mem
-       if (ret)
-               return ret;
-+      mtk_bmt_detach(mtd);
-       spinand_cleanup(spinand);
-       return 0;