+++ /dev/null
-CONFIG_64BIT=y
-CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y
-CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y
-CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y
-CONFIG_ARCH_DMA_ADDR_T_64BIT=y
-CONFIG_ARCH_FORCE_MAX_ORDER=10
-CONFIG_ARCH_HIBERNATION_POSSIBLE=y
-CONFIG_ARCH_KEEP_MEMBLOCK=y
-CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y
-CONFIG_ARCH_MMAP_RND_BITS=18
-CONFIG_ARCH_MMAP_RND_BITS_MAX=24
-CONFIG_ARCH_MMAP_RND_BITS_MIN=18
-CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11
-CONFIG_ARCH_PROC_KCORE_TEXT=y
-CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_SPARSEMEM_ENABLE=y
-CONFIG_ARCH_STACKWALK=y
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
-CONFIG_ARCH_WANTS_NO_INSTR=y
-CONFIG_ARCH_WANTS_THP_SWAP=y
-CONFIG_ARM64=y
-CONFIG_ARM64_4K_PAGES=y
-CONFIG_ARM64_ERRATUM_1165522=y
-CONFIG_ARM64_ERRATUM_1286807=y
-CONFIG_ARM64_ERRATUM_2051678=y
-CONFIG_ARM64_ERRATUM_2054223=y
-CONFIG_ARM64_ERRATUM_2067961=y
-CONFIG_ARM64_ERRATUM_2077057=y
-CONFIG_ARM64_ERRATUM_2658417=y
-CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y
-CONFIG_ARM64_PAGE_SHIFT=12
-CONFIG_ARM64_PA_BITS=48
-CONFIG_ARM64_PA_BITS_48=y
-CONFIG_ARM64_PTR_AUTH=y
-CONFIG_ARM64_PTR_AUTH_KERNEL=y
-CONFIG_ARM64_SME=y
-CONFIG_ARM64_SVE=y
-CONFIG_ARM64_TAGGED_ADDR_ABI=y
-CONFIG_ARM64_VA_BITS=39
-CONFIG_ARM64_VA_BITS_39=y
-CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y
-CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y
-CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y
-CONFIG_ARM_AMBA=y
-CONFIG_ARM_ARCH_TIMER=y
-CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
-CONFIG_ARM_GIC=y
-CONFIG_ARM_GIC_V2M=y
-CONFIG_ARM_GIC_V3=y
-CONFIG_ARM_GIC_V3_ITS=y
-CONFIG_ARM_GIC_V3_ITS_PCI=y
-# CONFIG_ARM_MHU_V2 is not set
-CONFIG_ARM_PSCI_CPUIDLE=y
-CONFIG_ARM_PSCI_FW=y
-# CONFIG_ARM_QCOM_CPUFREQ_HW is not set
-CONFIG_ARM_QCOM_CPUFREQ_NVMEM=y
-CONFIG_AT803X_PHY=y
-CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_MQ_PCI=y
-CONFIG_BLK_MQ_VIRTIO=y
-CONFIG_BLK_PM=y
-CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y
-CONFIG_CAVIUM_TX2_ERRATUM_219=y
-CONFIG_CC_HAVE_SHADOW_CALL_STACK=y
-CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y
-CONFIG_CLONE_BACKWARDS=y
-CONFIG_COMMON_CLK=y
-CONFIG_COMMON_CLK_QCOM=y
-CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
-# CONFIG_COMPAT_32BIT_TIME is not set
-CONFIG_CONTEXT_TRACKING=y
-CONFIG_CONTEXT_TRACKING_IDLE=y
-CONFIG_COREDUMP=y
-CONFIG_CPUFREQ_DT=y
-CONFIG_CPUFREQ_DT_PLATDEV=y
-CONFIG_CPU_FREQ=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
-CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
-CONFIG_CPU_FREQ_GOV_ATTR_SET=y
-# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
-# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set
-CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
-# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
-CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-# CONFIG_CPU_FREQ_GOV_USERSPACE is not set
-CONFIG_CPU_FREQ_STAT=y
-CONFIG_CPU_FREQ_THERMAL=y
-CONFIG_CPU_IDLE=y
-CONFIG_CPU_IDLE_GOV_MENU=y
-CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_PM=y
-CONFIG_CPU_RMAP=y
-CONFIG_CPU_THERMAL=y
-CONFIG_CRC16=y
-CONFIG_CRC8=y
-CONFIG_CRYPTO_AUTHENC=y
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_DEV_QCE=y
-CONFIG_CRYPTO_DEV_QCE_AEAD=y
-# CONFIG_CRYPTO_DEV_QCE_ENABLE_AEAD is not set
-CONFIG_CRYPTO_DEV_QCE_ENABLE_ALL=y
-# CONFIG_CRYPTO_DEV_QCE_ENABLE_SHA is not set
-# CONFIG_CRYPTO_DEV_QCE_ENABLE_SKCIPHER is not set
-CONFIG_CRYPTO_DEV_QCE_SHA=y
-CONFIG_CRYPTO_DEV_QCE_SKCIPHER=y
-CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN=512
-CONFIG_CRYPTO_DEV_QCOM_RNG=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_HASH_INFO=y
-CONFIG_CRYPTO_HW=y
-CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y
-CONFIG_CRYPTO_LIB_DES=y
-CONFIG_CRYPTO_LIB_GF128MUL=y
-CONFIG_CRYPTO_LIB_SHA1=y
-CONFIG_CRYPTO_LIB_SHA256=y
-CONFIG_CRYPTO_LIB_UTILS=y
-CONFIG_CRYPTO_LZO=y
-CONFIG_CRYPTO_RNG=y
-CONFIG_CRYPTO_RNG2=y
-CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_XTS=y
-CONFIG_CRYPTO_ZSTD=y
-CONFIG_DCACHE_WORD_ACCESS=y
-CONFIG_DEBUG_BUGVERBOSE=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEV_COREDUMP=y
-CONFIG_DMADEVICES=y
-CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y
-CONFIG_DMA_DIRECT_REMAP=y
-CONFIG_DMA_ENGINE=y
-CONFIG_DMA_OF=y
-CONFIG_DMA_VIRTUAL_CHANNELS=y
-CONFIG_DTC=y
-CONFIG_DT_IDLE_STATES=y
-CONFIG_EDAC_SUPPORT=y
-CONFIG_EXCLUSIVE_SYSTEM_RAM=y
-CONFIG_FIXED_PHY=y
-CONFIG_FIX_EARLYCON_MEM=y
-CONFIG_FRAME_POINTER=y
-CONFIG_FS_IOMAP=y
-CONFIG_FUJITSU_ERRATUM_010001=y
-CONFIG_FUNCTION_ALIGNMENT=4
-CONFIG_FUNCTION_ALIGNMENT_4B=y
-CONFIG_FWNODE_MDIO=y
-CONFIG_FW_LOADER_PAGED_BUF=y
-CONFIG_FW_LOADER_SYSFS=y
-CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND=y
-CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y
-CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_GENERIC_ARCH_TOPOLOGY=y
-CONFIG_GENERIC_BUG=y
-CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
-CONFIG_GENERIC_CPU_AUTOPROBE=y
-CONFIG_GENERIC_CPU_VULNERABILITIES=y
-CONFIG_GENERIC_CSUM=y
-CONFIG_GENERIC_EARLY_IOREMAP=y
-CONFIG_GENERIC_GETTIMEOFDAY=y
-CONFIG_GENERIC_IDLE_POLL_SETUP=y
-CONFIG_GENERIC_IOREMAP=y
-CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
-CONFIG_GENERIC_IRQ_SHOW=y
-CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
-CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y
-CONFIG_GENERIC_MSI_IRQ=y
-CONFIG_GENERIC_PCI_IOMAP=y
-CONFIG_GENERIC_PHY=y
-CONFIG_GENERIC_PINCONF=y
-CONFIG_GENERIC_PINCTRL_GROUPS=y
-CONFIG_GENERIC_PINMUX_FUNCTIONS=y
-CONFIG_GENERIC_SCHED_CLOCK=y
-CONFIG_GENERIC_SMP_IDLE_THREAD=y
-CONFIG_GENERIC_STRNCPY_FROM_USER=y
-CONFIG_GENERIC_STRNLEN_USER=y
-CONFIG_GENERIC_TIME_VSYSCALL=y
-CONFIG_GLOB=y
-CONFIG_GPIOLIB_IRQCHIP=y
-CONFIG_GPIO_CDEV=y
-CONFIG_HARDIRQS_SW_RESEND=y
-CONFIG_HAS_DMA=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT=y
-CONFIG_HAS_IOPORT_MAP=y
-CONFIG_HWSPINLOCK=y
-CONFIG_HWSPINLOCK_QCOM=y
-CONFIG_I2C=y
-CONFIG_I2C_BOARDINFO=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_HELPER_AUTO=y
-# CONFIG_I2C_QCOM_CCI is not set
-CONFIG_I2C_QUP=y
-CONFIG_IIO=y
-CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
-CONFIG_INITRAMFS_SOURCE=""
-CONFIG_IPQ_APSS_6018=y
-CONFIG_IPQ_APSS_PLL=y
-# CONFIG_IPQ_GCC_4019 is not set
-# CONFIG_IPQ_GCC_5018 is not set
-# CONFIG_IPQ_GCC_5332 is not set
-# CONFIG_IPQ_GCC_6018 is not set
-# CONFIG_IPQ_GCC_8074 is not set
-# CONFIG_IPQ_GCC_9574 is not set
-CONFIG_IRQCHIP=y
-CONFIG_IRQ_DOMAIN=y
-CONFIG_IRQ_DOMAIN_HIERARCHY=y
-CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y
-CONFIG_IRQ_FORCED_THREADING=y
-CONFIG_IRQ_WORK=y
-# CONFIG_KPSS_XCC is not set
-CONFIG_LEDS_TLC591XX=y
-CONFIG_LIBFDT=y
-CONFIG_LOCK_DEBUGGING_SUPPORT=y
-CONFIG_LOCK_SPIN_ON_OWNER=y
-CONFIG_LZO_COMPRESS=y
-CONFIG_LZO_DECOMPRESS=y
-CONFIG_MAILBOX=y
-# CONFIG_MAILBOX_TEST is not set
-CONFIG_MDIO_BUS=y
-CONFIG_MDIO_DEVICE=y
-CONFIG_MDIO_DEVRES=y
-CONFIG_MDIO_IPQ4019=y
-# CONFIG_MFD_QCOM_RPM is not set
-CONFIG_MFD_SYSCON=y
-CONFIG_MIGRATION=y
-# CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY is not set
-CONFIG_MMC=y
-CONFIG_MMC_BLOCK=y
-CONFIG_MMC_BLOCK_MINORS=32
-CONFIG_MMC_CQHCI=y
-CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_IO_ACCESSORS=y
-CONFIG_MMC_SDHCI_MSM=y
-# CONFIG_MMC_SDHCI_PCI is not set
-CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMU_LAZY_TLB_REFCOUNT=y
-CONFIG_MODULES_USE_ELF_RELA=y
-# CONFIG_MSM_GCC_8916 is not set
-# CONFIG_MSM_GCC_8917 is not set
-# CONFIG_MSM_GCC_8939 is not set
-# CONFIG_MSM_GCC_8976 is not set
-# CONFIG_MSM_GCC_8994 is not set
-# CONFIG_MSM_GCC_8996 is not set
-# CONFIG_MSM_GCC_8998 is not set
-# CONFIG_MSM_GPUCC_8998 is not set
-# CONFIG_MSM_MMCC_8996 is not set
-# CONFIG_MSM_MMCC_8998 is not set
-CONFIG_MTD_NAND_CORE=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_NAND_ECC_SW_HAMMING=y
-CONFIG_MTD_NAND_QCOM=y
-CONFIG_MTD_QCOMSMEM_PARTS=y
-CONFIG_MTD_RAW_NAND=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_BEB_LIMIT=20
-CONFIG_MTD_UBI_BLOCK=y
-CONFIG_MTD_UBI_WL_THRESHOLD=4096
-CONFIG_MUTEX_SPIN_ON_OWNER=y
-CONFIG_NEED_DMA_MAP_STATE=y
-CONFIG_NEED_SG_DMA_LENGTH=y
-CONFIG_NET_EGRESS=y
-CONFIG_NET_FLOW_LIMIT=y
-CONFIG_NET_INGRESS=y
-CONFIG_NET_SELFTESTS=y
-CONFIG_NET_XGRESS=y
-CONFIG_NLS=y
-CONFIG_NO_HZ_COMMON=y
-CONFIG_NO_HZ_IDLE=y
-CONFIG_NR_CPUS=4
-CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y
-CONFIG_NVMEM=y
-CONFIG_NVMEM_LAYOUTS=y
-CONFIG_NVMEM_QCOM_QFPROM=y
-# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set
-CONFIG_NVMEM_SYSFS=y
-CONFIG_NVMEM_U_BOOT_ENV=y
-CONFIG_OF=y
-CONFIG_OF_ADDRESS=y
-CONFIG_OF_EARLY_FLATTREE=y
-CONFIG_OF_FLATTREE=y
-CONFIG_OF_GPIO=y
-CONFIG_OF_IRQ=y
-CONFIG_OF_KOBJ=y
-CONFIG_OF_MDIO=y
-CONFIG_PADATA=y
-CONFIG_PAGE_POOL=y
-CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
-CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
-CONFIG_PAHOLE_HAS_LANG_EXCLUDE=y
-CONFIG_PARTITION_PERCPU=y
-CONFIG_PCI=y
-CONFIG_PCIEAER=y
-CONFIG_PCIEASPM=y
-CONFIG_PCIEASPM_DEFAULT=y
-# CONFIG_PCIEASPM_PERFORMANCE is not set
-# CONFIG_PCIEASPM_POWERSAVE is not set
-# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCIE_DW=y
-CONFIG_PCIE_DW_HOST=y
-CONFIG_PCIE_PME=y
-CONFIG_PCIE_QCOM=y
-CONFIG_PCI_DOMAINS=y
-CONFIG_PCI_DOMAINS_GENERIC=y
-CONFIG_PCI_MSI=y
-CONFIG_PER_VMA_LOCK=y
-CONFIG_PGTABLE_LEVELS=3
-CONFIG_PHYLIB=y
-CONFIG_PHYLIB_LEDS=y
-CONFIG_PHYS_ADDR_T_64BIT=y
-# CONFIG_PHY_QCOM_APQ8064_SATA is not set
-# CONFIG_PHY_QCOM_EDP is not set
-# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set
-# CONFIG_PHY_QCOM_IPQ4019_USB is not set
-# CONFIG_PHY_QCOM_IPQ806X_SATA is not set
-# CONFIG_PHY_QCOM_IPQ806X_USB is not set
-# CONFIG_PHY_QCOM_M31_USB is not set
-# CONFIG_PHY_QCOM_PCIE2 is not set
-CONFIG_PHY_QCOM_QMP=y
-CONFIG_PHY_QCOM_QMP_COMBO=y
-CONFIG_PHY_QCOM_QMP_PCIE=y
-CONFIG_PHY_QCOM_QMP_PCIE_8996=y
-CONFIG_PHY_QCOM_QMP_UFS=y
-CONFIG_PHY_QCOM_QMP_USB=y
-# CONFIG_PHY_QCOM_QMP_USB_LEGACY is not set
-CONFIG_PHY_QCOM_QUSB2=y
-# CONFIG_PHY_QCOM_SGMII_ETH is not set
-# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set
-# CONFIG_PHY_QCOM_USB_HS_28NM is not set
-# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set
-# CONFIG_PHY_QCOM_USB_SS is not set
-CONFIG_PINCTRL=y
-# CONFIG_PINCTRL_IPQ5018 is not set
-# CONFIG_PINCTRL_IPQ5332 is not set
-# CONFIG_PINCTRL_IPQ6018 is not set
-# CONFIG_PINCTRL_IPQ8074 is not set
-# CONFIG_PINCTRL_IPQ9574 is not set
-CONFIG_PINCTRL_MSM=y
-# CONFIG_PINCTRL_MSM8916 is not set
-# CONFIG_PINCTRL_MSM8976 is not set
-# CONFIG_PINCTRL_MSM8994 is not set
-# CONFIG_PINCTRL_MSM8996 is not set
-# CONFIG_PINCTRL_MSM8998 is not set
-# CONFIG_PINCTRL_QCM2290 is not set
-# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set
-# CONFIG_PINCTRL_QCS404 is not set
-# CONFIG_PINCTRL_QDU1000 is not set
-# CONFIG_PINCTRL_SA8775P is not set
-# CONFIG_PINCTRL_SC7180 is not set
-# CONFIG_PINCTRL_SC8280XP is not set
-# CONFIG_PINCTRL_SDM660 is not set
-# CONFIG_PINCTRL_SDM670 is not set
-# CONFIG_PINCTRL_SDM845 is not set
-# CONFIG_PINCTRL_SDX75 is not set
-# CONFIG_PINCTRL_SM6350 is not set
-# CONFIG_PINCTRL_SM6375 is not set
-# CONFIG_PINCTRL_SM7150 is not set
-# CONFIG_PINCTRL_SM8150 is not set
-# CONFIG_PINCTRL_SM8250 is not set
-# CONFIG_PINCTRL_SM8450 is not set
-# CONFIG_PINCTRL_SM8550 is not set
-CONFIG_PM=y
-CONFIG_PM_CLK=y
-CONFIG_PM_OPP=y
-CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y
-CONFIG_POWER_RESET=y
-# CONFIG_POWER_RESET_MSM is not set
-CONFIG_POWER_SUPPLY=y
-CONFIG_PREEMPT_NONE_BUILD=y
-CONFIG_PRINTK_TIME=y
-CONFIG_PTP_1588_CLOCK_OPTIONAL=y
-CONFIG_QCA807X_PHY=y
-CONFIG_QCA808X_PHY=y
-# CONFIG_QCM_DISPCC_2290 is not set
-# CONFIG_QCM_GCC_2290 is not set
-# CONFIG_QCOM_A53PLL is not set
-# CONFIG_QCOM_AOSS_QMP is not set
-CONFIG_QCOM_APCS_IPC=y
-# CONFIG_QCOM_APM is not set
-# CONFIG_QCOM_APR is not set
-CONFIG_QCOM_BAM_DMA=y
-# CONFIG_QCOM_CLK_APCC_MSM8996 is not set
-# CONFIG_QCOM_CLK_APCS_MSM8916 is not set
-# CONFIG_QCOM_COMMAND_DB is not set
-# CONFIG_QCOM_CPR is not set
-# CONFIG_QCOM_EBI2 is not set
-# CONFIG_QCOM_FASTRPC is not set
-# CONFIG_QCOM_GENI_SE is not set
-# CONFIG_QCOM_GSBI is not set
-# CONFIG_QCOM_HFPLL is not set
-# CONFIG_QCOM_ICC_BWMON is not set
-# CONFIG_QCOM_IPA is not set
-# CONFIG_QCOM_IPCC is not set
-# CONFIG_QCOM_LLCC is not set
-CONFIG_QCOM_MDT_LOADER=y
-# CONFIG_QCOM_MPM is not set
-CONFIG_QCOM_NET_PHYLIB=y
-# CONFIG_QCOM_OCMEM is not set
-# CONFIG_QCOM_PDC is not set
-CONFIG_QCOM_PIL_INFO=y
-# CONFIG_QCOM_Q6V5_ADSP is not set
-CONFIG_QCOM_Q6V5_COMMON=y
-# CONFIG_QCOM_Q6V5_MSS is not set
-# CONFIG_QCOM_Q6V5_PAS is not set
-CONFIG_QCOM_Q6V5_WCSS=y
-# CONFIG_QCOM_RAMP_CTRL is not set
-# CONFIG_QCOM_RMTFS_MEM is not set
-# CONFIG_QCOM_RPMH is not set
-# CONFIG_QCOM_RPM_MASTER_STATS is not set
-CONFIG_QCOM_RPROC_COMMON=y
-CONFIG_QCOM_SCM=y
-# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set
-# CONFIG_QCOM_SMD_RPM is not set
-CONFIG_QCOM_SMEM=y
-CONFIG_QCOM_SMEM_STATE=y
-CONFIG_QCOM_SMP2P=y
-# CONFIG_QCOM_SMSM is not set
-CONFIG_QCOM_SOCINFO=y
-# CONFIG_QCOM_SPM is not set
-# CONFIG_QCOM_STATS is not set
-# CONFIG_QCOM_SYSMON is not set
-CONFIG_QCOM_TSENS=y
-# CONFIG_QCOM_WCNSS_CTRL is not set
-# CONFIG_QCOM_WCNSS_PIL is not set
-CONFIG_QCOM_WDT=y
-# CONFIG_QCS_GCC_404 is not set
-# CONFIG_QCS_Q6SSTOP_404 is not set
-# CONFIG_QCS_TURING_404 is not set
-# CONFIG_QDU_GCC_1000 is not set
-CONFIG_QUEUED_RWLOCKS=y
-CONFIG_QUEUED_SPINLOCKS=y
-CONFIG_RANDSTRUCT_NONE=y
-CONFIG_RAS=y
-CONFIG_RATIONAL=y
-CONFIG_REGMAP=y
-CONFIG_REGMAP_I2C=y
-CONFIG_REGMAP_MMIO=y
-CONFIG_REGULATOR=y
-# CONFIG_REGULATOR_CPR3 is not set
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
-# CONFIG_REGULATOR_QCOM_REFGEN is not set
-# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set
-CONFIG_RELOCATABLE=y
-CONFIG_REMOTEPROC=y
-CONFIG_REMOTEPROC_CDEV=y
-CONFIG_RESET_CONTROLLER=y
-# CONFIG_RESET_QCOM_AOSS is not set
-# CONFIG_RESET_QCOM_PDC is not set
-CONFIG_RFS_ACCEL=y
-CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
-CONFIG_RPMSG=y
-CONFIG_RPMSG_CHAR=y
-# CONFIG_RPMSG_CTRL is not set
-# CONFIG_RPMSG_NS is not set
-CONFIG_RPMSG_QCOM_GLINK=y
-CONFIG_RPMSG_QCOM_GLINK_RPM=y
-CONFIG_RPMSG_QCOM_GLINK_SMEM=y
-CONFIG_RPMSG_QCOM_SMD=y
-# CONFIG_RPMSG_TTY is not set
-CONFIG_RPS=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_I2C_AND_SPI=y
-CONFIG_RWSEM_SPIN_ON_OWNER=y
-# CONFIG_SA_GCC_8775P is not set
-# CONFIG_SA_GPUCC_8775P is not set
-# CONFIG_SCHED_CORE is not set
-CONFIG_SCHED_MC=y
-CONFIG_SCHED_SMT=y
-CONFIG_SCHED_THERMAL_PRESSURE=y
-CONFIG_SCSI=y
-CONFIG_SCSI_COMMON=y
-# CONFIG_SCSI_LOWLEVEL is not set
-# CONFIG_SCSI_PROC_FS is not set
-# CONFIG_SC_CAMCC_7280 is not set
-# CONFIG_SC_DISPCC_7180 is not set
-# CONFIG_SC_DISPCC_8280XP is not set
-# CONFIG_SC_GCC_7180 is not set
-# CONFIG_SC_GCC_8280XP is not set
-# CONFIG_SC_GPUCC_7180 is not set
-# CONFIG_SC_LPASSCC_7280 is not set
-# CONFIG_SC_LPASSCC_8280XP is not set
-# CONFIG_SC_LPASS_CORECC_7180 is not set
-# CONFIG_SC_LPASS_CORECC_7280 is not set
-# CONFIG_SC_MSS_7180 is not set
-# CONFIG_SC_VIDEOCC_7180 is not set
-# CONFIG_SDM_CAMCC_845 is not set
-# CONFIG_SDM_DISPCC_845 is not set
-# CONFIG_SDM_GCC_660 is not set
-# CONFIG_SDM_GCC_845 is not set
-# CONFIG_SDM_GPUCC_845 is not set
-# CONFIG_SDM_LPASSCC_845 is not set
-# CONFIG_SDM_VIDEOCC_845 is not set
-# CONFIG_SDX_GCC_75 is not set
-CONFIG_SERIAL_8250_FSL=y
-CONFIG_SERIAL_MCTRL_GPIO=y
-CONFIG_SERIAL_MSM=y
-CONFIG_SERIAL_MSM_CONSOLE=y
-CONFIG_SGL_ALLOC=y
-CONFIG_SG_POOL=y
-CONFIG_SMP=y
-# CONFIG_SM_CAMCC_6350 is not set
-# CONFIG_SM_CAMCC_8450 is not set
-# CONFIG_SM_GCC_7150 is not set
-# CONFIG_SM_GCC_8150 is not set
-# CONFIG_SM_GCC_8250 is not set
-# CONFIG_SM_GCC_8450 is not set
-# CONFIG_SM_GCC_8550 is not set
-# CONFIG_SM_GPUCC_6115 is not set
-# CONFIG_SM_GPUCC_6125 is not set
-# CONFIG_SM_GPUCC_6350 is not set
-# CONFIG_SM_GPUCC_6375 is not set
-# CONFIG_SM_GPUCC_8150 is not set
-# CONFIG_SM_GPUCC_8250 is not set
-# CONFIG_SM_GPUCC_8350 is not set
-# CONFIG_SM_GPUCC_8450 is not set
-# CONFIG_SM_GPUCC_8550 is not set
-# CONFIG_SM_TCSRCC_8550 is not set
-# CONFIG_SM_VIDEOCC_8150 is not set
-# CONFIG_SM_VIDEOCC_8250 is not set
-# CONFIG_SM_VIDEOCC_8350 is not set
-# CONFIG_SM_VIDEOCC_8450 is not set
-# CONFIG_SM_VIDEOCC_8550 is not set
-CONFIG_SOCK_RX_QUEUE_MAPPING=y
-CONFIG_SOC_BUS=y
-CONFIG_SOFTIRQ_ON_OWN_STACK=y
-CONFIG_SPARSEMEM=y
-CONFIG_SPARSEMEM_EXTREME=y
-CONFIG_SPARSEMEM_VMEMMAP=y
-CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_SPI=y
-CONFIG_SPI_MASTER=y
-CONFIG_SPI_MEM=y
-CONFIG_SPI_QUP=y
-CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
-CONFIG_SWIOTLB=y
-CONFIG_SWPHY=y
-CONFIG_SYSCTL_EXCEPTION_TRACE=y
-CONFIG_THERMAL=y
-CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
-CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
-CONFIG_THERMAL_GOV_STEP_WISE=y
-CONFIG_THERMAL_OF=y
-CONFIG_THREAD_INFO_IN_TASK=y
-CONFIG_TICK_CPU_ACCOUNTING=y
-CONFIG_TIMER_OF=y
-CONFIG_TIMER_PROBE=y
-CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y
-CONFIG_TREE_RCU=y
-CONFIG_TREE_SRCU=y
-CONFIG_UBIFS_FS=y
-CONFIG_UBIFS_FS_ADVANCED_COMPR=y
-# CONFIG_UCLAMP_TASK is not set
-CONFIG_UNMAP_KERNEL_AT_EL0=y
-CONFIG_USB=y
-CONFIG_USB_COMMON=y
-CONFIG_USB_SUPPORT=y
-CONFIG_VIRTIO=y
-CONFIG_VIRTIO_ANCHOR=y
-# CONFIG_VIRTIO_BLK is not set
-# CONFIG_VIRTIO_NET is not set
-CONFIG_VMAP_STACK=y
-CONFIG_WANT_DEV_COREDUMP=y
-CONFIG_WATCHDOG_CORE=y
-CONFIG_WATCHDOG_SYSFS=y
-CONFIG_XPS=y
-CONFIG_XXHASH=y
-CONFIG_ZLIB_DEFLATE=y
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZONE_DMA32=y
-CONFIG_ZSTD_COMMON=y
-CONFIG_ZSTD_COMPRESS=y
-CONFIG_ZSTD_DECOMPRESS=y
+++ /dev/null
-From 0e8527d076cfb3fa55777a2ece735852fcf3e850 Mon Sep 17 00:00:00 2001
-From: Anusha Rao <quic_anusha@quicinc.com>
-Date: Wed, 27 Sep 2023 12:13:18 +0530
-Subject: [PATCH] arm64: dts: qcom: ipq9574: Add common RDP dtsi file
-
-Add a dtsi file to include interfaces that are common
-across RDPs.
-
-Signed-off-by: Anusha Rao <quic_anusha@quicinc.com>
-Signed-off-by: Kathiravan Thirumoorthy <quic_kathirav@quicinc.com>
-Link: https://lore.kernel.org/r/20230927-common-rdp-v3-1-3d07b3ff6d42@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- .../boot/dts/qcom/ipq9574-rdp-common.dtsi | 125 ++++++++++++++++++
- arch/arm64/boot/dts/qcom/ipq9574-rdp418.dts | 63 +--------
- arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts | 91 +------------
- arch/arm64/boot/dts/qcom/ipq9574-rdp449.dts | 65 +--------
- arch/arm64/boot/dts/qcom/ipq9574-rdp453.dts | 65 +--------
- arch/arm64/boot/dts/qcom/ipq9574-rdp454.dts | 66 +--------
- 6 files changed, 130 insertions(+), 345 deletions(-)
- create mode 100644 arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-
---- /dev/null
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-@@ -0,0 +1,125 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+/*
-+ * IPQ9574 RDP board common device tree source
-+ *
-+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
-+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/dts-v1/;
-+
-+#include "ipq9574.dtsi"
-+
-+/ {
-+ aliases {
-+ serial0 = &blsp1_uart2;
-+ };
-+
-+ chosen {
-+ stdout-path = "serial0:115200n8";
-+ };
-+
-+ regulator_fixed_3p3: s3300 {
-+ compatible = "regulator-fixed";
-+ regulator-min-microvolt = <3300000>;
-+ regulator-max-microvolt = <3300000>;
-+ regulator-boot-on;
-+ regulator-always-on;
-+ regulator-name = "fixed_3p3";
-+ };
-+
-+ regulator_fixed_0p925: s0925 {
-+ compatible = "regulator-fixed";
-+ regulator-min-microvolt = <925000>;
-+ regulator-max-microvolt = <925000>;
-+ regulator-boot-on;
-+ regulator-always-on;
-+ regulator-name = "fixed_0p925";
-+ };
-+};
-+
-+&blsp1_spi0 {
-+ pinctrl-0 = <&spi_0_pins>;
-+ pinctrl-names = "default";
-+ status = "okay";
-+
-+ flash@0 {
-+ compatible = "micron,n25q128a11", "jedec,spi-nor";
-+ reg = <0>;
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <50000000>;
-+ };
-+};
-+
-+&blsp1_uart2 {
-+ pinctrl-0 = <&uart2_pins>;
-+ pinctrl-names = "default";
-+ status = "okay";
-+};
-+
-+&rpm_requests {
-+ regulators {
-+ compatible = "qcom,rpm-mp5496-regulators";
-+
-+ ipq9574_s1: s1 {
-+ /*
-+ * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
-+ * During regulator registration, kernel not knowing the initial voltage,
-+ * considers it as zero and brings up the regulators with minimum supported voltage.
-+ * Update the regulator-min-microvolt with SVS voltage of 725mV so that
-+ * the regulators are brought up with 725mV which is sufficient for all the
-+ * corner parts to operate at 800MHz
-+ */
-+ regulator-min-microvolt = <725000>;
-+ regulator-max-microvolt = <1075000>;
-+ };
-+
-+ mp5496_l2: l2 {
-+ regulator-min-microvolt = <1800000>;
-+ regulator-max-microvolt = <1800000>;
-+ regulator-always-on;
-+ regulator-boot-on;
-+ };
-+ };
-+};
-+
-+&sleep_clk {
-+ clock-frequency = <32000>;
-+};
-+
-+&tlmm {
-+ spi_0_pins: spi-0-state {
-+ pins = "gpio11", "gpio12", "gpio13", "gpio14";
-+ function = "blsp0_spi";
-+ drive-strength = <8>;
-+ bias-disable;
-+ };
-+};
-+
-+&usb_0_dwc3 {
-+ dr_mode = "host";
-+};
-+
-+&usb_0_qmpphy {
-+ vdda-pll-supply = <&mp5496_l2>;
-+ vdda-phy-supply = <®ulator_fixed_0p925>;
-+
-+ status = "okay";
-+};
-+
-+&usb_0_qusbphy {
-+ vdd-supply = <®ulator_fixed_0p925>;
-+ vdda-pll-supply = <&mp5496_l2>;
-+ vdda-phy-dpdm-supply = <®ulator_fixed_3p3>;
-+
-+ status = "okay";
-+};
-+
-+&usb3 {
-+ status = "okay";
-+};
-+
-+&xo_board_clk {
-+ clock-frequency = <24000000>;
-+};
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp418.dts
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp418.dts
-@@ -8,58 +8,12 @@
-
- /dts-v1/;
-
--#include "ipq9574.dtsi"
-+#include "ipq9574-rdp-common.dtsi"
-
- / {
- model = "Qualcomm Technologies, Inc. IPQ9574/AP-AL02-C2";
- compatible = "qcom,ipq9574-ap-al02-c2", "qcom,ipq9574";
-
-- aliases {
-- serial0 = &blsp1_uart2;
-- };
--
-- chosen {
-- stdout-path = "serial0:115200n8";
-- };
--};
--
--&blsp1_spi0 {
-- pinctrl-0 = <&spi_0_pins>;
-- pinctrl-names = "default";
-- status = "okay";
--
-- flash@0 {
-- compatible = "micron,n25q128a11", "jedec,spi-nor";
-- reg = <0>;
-- #address-cells = <1>;
-- #size-cells = <1>;
-- spi-max-frequency = <50000000>;
-- };
--};
--
--&blsp1_uart2 {
-- pinctrl-0 = <&uart2_pins>;
-- pinctrl-names = "default";
-- status = "okay";
--};
--
--&rpm_requests {
-- regulators {
-- compatible = "qcom,rpm-mp5496-regulators";
--
-- ipq9574_s1: s1 {
-- /*
-- * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
-- * During regulator registration, kernel not knowing the initial voltage,
-- * considers it as zero and brings up the regulators with minimum supported voltage.
-- * Update the regulator-min-microvolt with SVS voltage of 725mV so that
-- * the regulators are brought up with 725mV which is sufficient for all the
-- * corner parts to operate at 800MHz
-- */
-- regulator-min-microvolt = <725000>;
-- regulator-max-microvolt = <1075000>;
-- };
-- };
- };
-
- &sdhc_1 {
-@@ -74,10 +28,6 @@
- status = "okay";
- };
-
--&sleep_clk {
-- clock-frequency = <32000>;
--};
--
- &tlmm {
- sdc_default_state: sdc-default-state {
- clk-pins {
-@@ -110,15 +60,4 @@
- bias-pull-down;
- };
- };
--
-- spi_0_pins: spi-0-state {
-- pins = "gpio11", "gpio12", "gpio13", "gpio14";
-- function = "blsp0_spi";
-- drive-strength = <8>;
-- bias-disable;
-- };
--};
--
--&xo_board_clk {
-- clock-frequency = <24000000>;
- };
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
-@@ -8,69 +8,11 @@
-
- /dts-v1/;
-
--#include "ipq9574.dtsi"
-+#include "ipq9574-rdp-common.dtsi"
-
- / {
- model = "Qualcomm Technologies, Inc. IPQ9574/AP-AL02-C7";
- compatible = "qcom,ipq9574-ap-al02-c7", "qcom,ipq9574";
--
-- aliases {
-- serial0 = &blsp1_uart2;
-- };
--
-- chosen {
-- stdout-path = "serial0:115200n8";
-- };
--
-- regulator_fixed_3p3: s3300 {
-- compatible = "regulator-fixed";
-- regulator-min-microvolt = <3300000>;
-- regulator-max-microvolt = <3300000>;
-- regulator-boot-on;
-- regulator-always-on;
-- regulator-name = "fixed_3p3";
-- };
--
-- regulator_fixed_0p925: s0925 {
-- compatible = "regulator-fixed";
-- regulator-min-microvolt = <925000>;
-- regulator-max-microvolt = <925000>;
-- regulator-boot-on;
-- regulator-always-on;
-- regulator-name = "fixed_0p925";
-- };
--};
--
--&blsp1_uart2 {
-- pinctrl-0 = <&uart2_pins>;
-- pinctrl-names = "default";
-- status = "okay";
--};
--
--&rpm_requests {
-- regulators {
-- compatible = "qcom,rpm-mp5496-regulators";
--
-- ipq9574_s1: s1 {
-- /*
-- * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
-- * During regulator registration, kernel not knowing the initial voltage,
-- * considers it as zero and brings up the regulators with minimum supported voltage.
-- * Update the regulator-min-microvolt with SVS voltage of 725mV so that
-- * the regulators are brought up with 725mV which is sufficient for all the
-- * corner parts to operate at 800MHz
-- */
-- regulator-min-microvolt = <725000>;
-- regulator-max-microvolt = <1075000>;
-- };
--
-- mp5496_l2: l2 {
-- regulator-min-microvolt = <1800000>;
-- regulator-max-microvolt = <1800000>;
-- regulator-always-on;
-- regulator-boot-on;
-- };
-- };
- };
-
- &sdhc_1 {
-@@ -85,10 +27,6 @@
- status = "okay";
- };
-
--&sleep_clk {
-- clock-frequency = <32000>;
--};
--
- &tlmm {
- sdc_default_state: sdc-default-state {
- clk-pins {
-@@ -122,30 +60,3 @@
- };
- };
- };
--
--&usb_0_dwc3 {
-- dr_mode = "host";
--};
--
--&usb_0_qmpphy {
-- vdda-pll-supply = <&mp5496_l2>;
-- vdda-phy-supply = <®ulator_fixed_0p925>;
--
-- status = "okay";
--};
--
--&usb_0_qusbphy {
-- vdd-supply = <®ulator_fixed_0p925>;
-- vdda-pll-supply = <&mp5496_l2>;
-- vdda-phy-dpdm-supply = <®ulator_fixed_3p3>;
--
-- status = "okay";
--};
--
--&usb3 {
-- status = "okay";
--};
--
--&xo_board_clk {
-- clock-frequency = <24000000>;
--};
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp449.dts
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp449.dts
-@@ -8,73 +8,10 @@
-
- /dts-v1/;
-
--#include "ipq9574.dtsi"
-+#include "ipq9574-rdp-common.dtsi"
-
- / {
- model = "Qualcomm Technologies, Inc. IPQ9574/AP-AL02-C6";
- compatible = "qcom,ipq9574-ap-al02-c6", "qcom,ipq9574";
-
-- aliases {
-- serial0 = &blsp1_uart2;
-- };
--
-- chosen {
-- stdout-path = "serial0:115200n8";
-- };
--};
--
--&blsp1_spi0 {
-- pinctrl-0 = <&spi_0_pins>;
-- pinctrl-names = "default";
-- status = "okay";
--
-- flash@0 {
-- compatible = "micron,n25q128a11", "jedec,spi-nor";
-- reg = <0>;
-- #address-cells = <1>;
-- #size-cells = <1>;
-- spi-max-frequency = <50000000>;
-- };
--};
--
--&blsp1_uart2 {
-- pinctrl-0 = <&uart2_pins>;
-- pinctrl-names = "default";
-- status = "okay";
--};
--
--&rpm_requests {
-- regulators {
-- compatible = "qcom,rpm-mp5496-regulators";
--
-- ipq9574_s1: s1 {
-- /*
-- * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
-- * During regulator registration, kernel not knowing the initial voltage,
-- * considers it as zero and brings up the regulators with minimum supported voltage.
-- * Update the regulator-min-microvolt with SVS voltage of 725mV so that
-- * the regulators are brought up with 725mV which is sufficient for all the
-- * corner parts to operate at 800MHz
-- */
-- regulator-min-microvolt = <725000>;
-- regulator-max-microvolt = <1075000>;
-- };
-- };
--};
--
--&sleep_clk {
-- clock-frequency = <32000>;
--};
--
--&tlmm {
-- spi_0_pins: spi-0-state {
-- pins = "gpio11", "gpio12", "gpio13", "gpio14";
-- function = "blsp0_spi";
-- drive-strength = <8>;
-- bias-disable;
-- };
--};
--
--&xo_board_clk {
-- clock-frequency = <24000000>;
- };
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp453.dts
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp453.dts
-@@ -8,73 +8,10 @@
-
- /dts-v1/;
-
--#include "ipq9574.dtsi"
-+#include "ipq9574-rdp-common.dtsi"
-
- / {
- model = "Qualcomm Technologies, Inc. IPQ9574/AP-AL02-C8";
- compatible = "qcom,ipq9574-ap-al02-c8", "qcom,ipq9574";
-
-- aliases {
-- serial0 = &blsp1_uart2;
-- };
--
-- chosen {
-- stdout-path = "serial0:115200n8";
-- };
--};
--
--&blsp1_spi0 {
-- pinctrl-0 = <&spi_0_pins>;
-- pinctrl-names = "default";
-- status = "okay";
--
-- flash@0 {
-- compatible = "micron,n25q128a11", "jedec,spi-nor";
-- reg = <0>;
-- #address-cells = <1>;
-- #size-cells = <1>;
-- spi-max-frequency = <50000000>;
-- };
--};
--
--&blsp1_uart2 {
-- pinctrl-0 = <&uart2_pins>;
-- pinctrl-names = "default";
-- status = "okay";
--};
--
--&rpm_requests {
-- regulators {
-- compatible = "qcom,rpm-mp5496-regulators";
--
-- ipq9574_s1: s1 {
-- /*
-- * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
-- * During regulator registration, kernel not knowing the initial voltage,
-- * considers it as zero and brings up the regulators with minimum supported voltage.
-- * Update the regulator-min-microvolt with SVS voltage of 725mV so that
-- * the regulators are brought up with 725mV which is sufficient for all the
-- * corner parts to operate at 800MHz
-- */
-- regulator-min-microvolt = <725000>;
-- regulator-max-microvolt = <1075000>;
-- };
-- };
--};
--
--&sleep_clk {
-- clock-frequency = <32000>;
--};
--
--&tlmm {
-- spi_0_pins: spi-0-state {
-- pins = "gpio11", "gpio12", "gpio13", "gpio14";
-- function = "blsp0_spi";
-- drive-strength = <8>;
-- bias-disable;
-- };
--};
--
--&xo_board_clk {
-- clock-frequency = <24000000>;
- };
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp454.dts
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp454.dts
-@@ -8,73 +8,9 @@
-
- /dts-v1/;
-
--#include "ipq9574.dtsi"
-+#include "ipq9574-rdp-common.dtsi"
-
- / {
- model = "Qualcomm Technologies, Inc. IPQ9574/AP-AL02-C9";
- compatible = "qcom,ipq9574-ap-al02-c9", "qcom,ipq9574";
--
-- aliases {
-- serial0 = &blsp1_uart2;
-- };
--
-- chosen {
-- stdout-path = "serial0:115200n8";
-- };
--};
--
--&blsp1_spi0 {
-- pinctrl-0 = <&spi_0_pins>;
-- pinctrl-names = "default";
-- status = "okay";
--
-- flash@0 {
-- compatible = "micron,n25q128a11", "jedec,spi-nor";
-- reg = <0>;
-- #address-cells = <1>;
-- #size-cells = <1>;
-- spi-max-frequency = <50000000>;
-- };
--};
--
--&blsp1_uart2 {
-- pinctrl-0 = <&uart2_pins>;
-- pinctrl-names = "default";
-- status = "okay";
--};
--
--&rpm_requests {
-- regulators {
-- compatible = "qcom,rpm-mp5496-regulators";
--
-- ipq9574_s1: s1 {
-- /*
-- * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
-- * During regulator registration, kernel not knowing the initial voltage,
-- * considers it as zero and brings up the regulators with minimum supported voltage.
-- * Update the regulator-min-microvolt with SVS voltage of 725mV so that
-- * the regulators are brought up with 725mV which is sufficient for all the
-- * corner parts to operate at 800MHz
-- */
-- regulator-min-microvolt = <725000>;
-- regulator-max-microvolt = <1075000>;
-- };
-- };
--};
--
--&sleep_clk {
-- clock-frequency = <32000>;
--};
--
--&tlmm {
-- spi_0_pins: spi-0-state {
-- pins = "gpio11", "gpio12", "gpio13", "gpio14";
-- function = "blsp0_spi";
-- drive-strength = <8>;
-- bias-disable;
-- };
--};
--
--&xo_board_clk {
-- clock-frequency = <24000000>;
- };
+++ /dev/null
-From 80bbd1c355d661678d2a25bd36e739b6925e7a4e Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 5 Jun 2024 20:45:39 +0800
-Subject: [PATCH] dt-bindings: clock: add qca8386/qca8084 clock and reset
- definitions
-
-QCA8386/QCA8084 includes the clock & reset controller that is
-accessed by MDIO bus. Two work modes are supported, qca8386 works
-as switch mode, qca8084 works as PHY mode.
-
-Reviewed-by: Rob Herring <robh@kernel.org>
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
-Link: https://lore.kernel.org/r/20240605124541.2711467-3-quic_luoj@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- .../bindings/clock/qcom,qca8k-nsscc.yaml | 86 +++++++++++++++
- include/dt-bindings/clock/qcom,qca8k-nsscc.h | 101 ++++++++++++++++++
- include/dt-bindings/reset/qcom,qca8k-nsscc.h | 76 +++++++++++++
- 3 files changed, 263 insertions(+)
- create mode 100644 Documentation/devicetree/bindings/clock/qcom,qca8k-nsscc.yaml
- create mode 100644 include/dt-bindings/clock/qcom,qca8k-nsscc.h
- create mode 100644 include/dt-bindings/reset/qcom,qca8k-nsscc.h
-
---- /dev/null
-+++ b/Documentation/devicetree/bindings/clock/qcom,qca8k-nsscc.yaml
-@@ -0,0 +1,86 @@
-+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-+%YAML 1.2
-+---
-+$id: http://devicetree.org/schemas/clock/qcom,qca8k-nsscc.yaml#
-+$schema: http://devicetree.org/meta-schemas/core.yaml#
-+
-+title: Qualcomm NSS Clock & Reset Controller on QCA8386/QCA8084
-+
-+maintainers:
-+ - Bjorn Andersson <andersson@kernel.org>
-+ - Luo Jie <quic_luoj@quicinc.com>
-+
-+description: |
-+ Qualcomm NSS clock control module provides the clocks and resets
-+ on QCA8386(switch mode)/QCA8084(PHY mode)
-+
-+ See also::
-+ include/dt-bindings/clock/qcom,qca8k-nsscc.h
-+ include/dt-bindings/reset/qcom,qca8k-nsscc.h
-+
-+properties:
-+ compatible:
-+ oneOf:
-+ - const: qcom,qca8084-nsscc
-+ - items:
-+ - enum:
-+ - qcom,qca8082-nsscc
-+ - qcom,qca8085-nsscc
-+ - qcom,qca8384-nsscc
-+ - qcom,qca8385-nsscc
-+ - qcom,qca8386-nsscc
-+ - const: qcom,qca8084-nsscc
-+
-+ clocks:
-+ items:
-+ - description: Chip reference clock source
-+ - description: UNIPHY0 RX 312P5M/125M clock source
-+ - description: UNIPHY0 TX 312P5M/125M clock source
-+ - description: UNIPHY1 RX 312P5M/125M clock source
-+ - description: UNIPHY1 TX 312P5M/125M clock source
-+ - description: UNIPHY1 RX 312P5M clock source
-+ - description: UNIPHY1 TX 312P5M clock source
-+
-+ reg:
-+ items:
-+ - description: MDIO bus address for Clock & Reset Controller register
-+
-+ reset-gpios:
-+ description: GPIO connected to the chip
-+ maxItems: 1
-+
-+required:
-+ - compatible
-+ - clocks
-+ - reg
-+ - reset-gpios
-+
-+allOf:
-+ - $ref: qcom,gcc.yaml#
-+
-+unevaluatedProperties: false
-+
-+examples:
-+ - |
-+ #include <dt-bindings/gpio/gpio.h>
-+ mdio {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ clock-controller@18 {
-+ compatible = "qcom,qca8084-nsscc";
-+ reg = <0x18>;
-+ reset-gpios = <&tlmm 51 GPIO_ACTIVE_LOW>;
-+ clocks = <&pcs0_pll>,
-+ <&qca8k_uniphy0_rx>,
-+ <&qca8k_uniphy0_tx>,
-+ <&qca8k_uniphy1_rx>,
-+ <&qca8k_uniphy1_tx>,
-+ <&qca8k_uniphy1_rx312p5m>,
-+ <&qca8k_uniphy1_tx312p5m>;
-+ #clock-cells = <1>;
-+ #reset-cells = <1>;
-+ #power-domain-cells = <1>;
-+ };
-+ };
-+...
---- /dev/null
-+++ b/include/dt-bindings/clock/qcom,qca8k-nsscc.h
-@@ -0,0 +1,101 @@
-+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-+/*
-+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef _DT_BINDINGS_CLK_QCOM_QCA8K_NSS_CC_H
-+#define _DT_BINDINGS_CLK_QCOM_QCA8K_NSS_CC_H
-+
-+#define NSS_CC_SWITCH_CORE_CLK_SRC 0
-+#define NSS_CC_SWITCH_CORE_CLK 1
-+#define NSS_CC_APB_BRIDGE_CLK 2
-+#define NSS_CC_MAC0_TX_CLK_SRC 3
-+#define NSS_CC_MAC0_TX_DIV_CLK_SRC 4
-+#define NSS_CC_MAC0_TX_CLK 5
-+#define NSS_CC_MAC0_TX_SRDS1_CLK 6
-+#define NSS_CC_MAC0_RX_CLK_SRC 7
-+#define NSS_CC_MAC0_RX_DIV_CLK_SRC 8
-+#define NSS_CC_MAC0_RX_CLK 9
-+#define NSS_CC_MAC0_RX_SRDS1_CLK 10
-+#define NSS_CC_MAC1_TX_CLK_SRC 11
-+#define NSS_CC_MAC1_TX_DIV_CLK_SRC 12
-+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_DIV_CLK_SRC 13
-+#define NSS_CC_MAC1_SRDS1_CH0_RX_CLK 14
-+#define NSS_CC_MAC1_TX_CLK 15
-+#define NSS_CC_MAC1_GEPHY0_TX_CLK 16
-+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_CLK 17
-+#define NSS_CC_MAC1_RX_CLK_SRC 18
-+#define NSS_CC_MAC1_RX_DIV_CLK_SRC 19
-+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_DIV_CLK_SRC 20
-+#define NSS_CC_MAC1_SRDS1_CH0_TX_CLK 21
-+#define NSS_CC_MAC1_RX_CLK 22
-+#define NSS_CC_MAC1_GEPHY0_RX_CLK 23
-+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_CLK 24
-+#define NSS_CC_MAC2_TX_CLK_SRC 25
-+#define NSS_CC_MAC2_TX_DIV_CLK_SRC 26
-+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_DIV_CLK_SRC 27
-+#define NSS_CC_MAC2_SRDS1_CH1_RX_CLK 28
-+#define NSS_CC_MAC2_TX_CLK 29
-+#define NSS_CC_MAC2_GEPHY1_TX_CLK 30
-+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_CLK 31
-+#define NSS_CC_MAC2_RX_CLK_SRC 32
-+#define NSS_CC_MAC2_RX_DIV_CLK_SRC 33
-+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_DIV_CLK_SRC 34
-+#define NSS_CC_MAC2_SRDS1_CH1_TX_CLK 35
-+#define NSS_CC_MAC2_RX_CLK 36
-+#define NSS_CC_MAC2_GEPHY1_RX_CLK 37
-+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_CLK 38
-+#define NSS_CC_MAC3_TX_CLK_SRC 39
-+#define NSS_CC_MAC3_TX_DIV_CLK_SRC 40
-+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_DIV_CLK_SRC 41
-+#define NSS_CC_MAC3_SRDS1_CH2_RX_CLK 42
-+#define NSS_CC_MAC3_TX_CLK 43
-+#define NSS_CC_MAC3_GEPHY2_TX_CLK 44
-+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_CLK 45
-+#define NSS_CC_MAC3_RX_CLK_SRC 46
-+#define NSS_CC_MAC3_RX_DIV_CLK_SRC 47
-+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_DIV_CLK_SRC 48
-+#define NSS_CC_MAC3_SRDS1_CH2_TX_CLK 49
-+#define NSS_CC_MAC3_RX_CLK 50
-+#define NSS_CC_MAC3_GEPHY2_RX_CLK 51
-+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_CLK 52
-+#define NSS_CC_MAC4_TX_CLK_SRC 53
-+#define NSS_CC_MAC4_TX_DIV_CLK_SRC 54
-+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_DIV_CLK_SRC 55
-+#define NSS_CC_MAC4_SRDS1_CH3_RX_CLK 56
-+#define NSS_CC_MAC4_TX_CLK 57
-+#define NSS_CC_MAC4_GEPHY3_TX_CLK 58
-+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_CLK 59
-+#define NSS_CC_MAC4_RX_CLK_SRC 60
-+#define NSS_CC_MAC4_RX_DIV_CLK_SRC 61
-+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_DIV_CLK_SRC 62
-+#define NSS_CC_MAC4_SRDS1_CH3_TX_CLK 63
-+#define NSS_CC_MAC4_RX_CLK 64
-+#define NSS_CC_MAC4_GEPHY3_RX_CLK 65
-+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_CLK 66
-+#define NSS_CC_MAC5_TX_CLK_SRC 67
-+#define NSS_CC_MAC5_TX_DIV_CLK_SRC 68
-+#define NSS_CC_MAC5_TX_SRDS0_CLK 69
-+#define NSS_CC_MAC5_TX_CLK 70
-+#define NSS_CC_MAC5_RX_CLK_SRC 71
-+#define NSS_CC_MAC5_RX_DIV_CLK_SRC 72
-+#define NSS_CC_MAC5_RX_SRDS0_CLK 73
-+#define NSS_CC_MAC5_RX_CLK 74
-+#define NSS_CC_MAC5_TX_SRDS0_CLK_SRC 75
-+#define NSS_CC_MAC5_RX_SRDS0_CLK_SRC 76
-+#define NSS_CC_AHB_CLK_SRC 77
-+#define NSS_CC_AHB_CLK 78
-+#define NSS_CC_SEC_CTRL_AHB_CLK 79
-+#define NSS_CC_TLMM_CLK 80
-+#define NSS_CC_TLMM_AHB_CLK 81
-+#define NSS_CC_CNOC_AHB_CLK 82
-+#define NSS_CC_MDIO_AHB_CLK 83
-+#define NSS_CC_MDIO_MASTER_AHB_CLK 84
-+#define NSS_CC_SYS_CLK_SRC 85
-+#define NSS_CC_SRDS0_SYS_CLK 86
-+#define NSS_CC_SRDS1_SYS_CLK 87
-+#define NSS_CC_GEPHY0_SYS_CLK 88
-+#define NSS_CC_GEPHY1_SYS_CLK 89
-+#define NSS_CC_GEPHY2_SYS_CLK 90
-+#define NSS_CC_GEPHY3_SYS_CLK 91
-+#endif
---- /dev/null
-+++ b/include/dt-bindings/reset/qcom,qca8k-nsscc.h
-@@ -0,0 +1,76 @@
-+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-+/*
-+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef _DT_BINDINGS_RESET_QCOM_QCA8K_NSS_CC_H
-+#define _DT_BINDINGS_RESET_QCOM_QCA8K_NSS_CC_H
-+
-+#define NSS_CC_SWITCH_CORE_ARES 1
-+#define NSS_CC_APB_BRIDGE_ARES 2
-+#define NSS_CC_MAC0_TX_ARES 3
-+#define NSS_CC_MAC0_TX_SRDS1_ARES 4
-+#define NSS_CC_MAC0_RX_ARES 5
-+#define NSS_CC_MAC0_RX_SRDS1_ARES 6
-+#define NSS_CC_MAC1_SRDS1_CH0_RX_ARES 7
-+#define NSS_CC_MAC1_TX_ARES 8
-+#define NSS_CC_MAC1_GEPHY0_TX_ARES 9
-+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_ARES 10
-+#define NSS_CC_MAC1_SRDS1_CH0_TX_ARES 11
-+#define NSS_CC_MAC1_RX_ARES 12
-+#define NSS_CC_MAC1_GEPHY0_RX_ARES 13
-+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_ARES 14
-+#define NSS_CC_MAC2_SRDS1_CH1_RX_ARES 15
-+#define NSS_CC_MAC2_TX_ARES 16
-+#define NSS_CC_MAC2_GEPHY1_TX_ARES 17
-+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_ARES 18
-+#define NSS_CC_MAC2_SRDS1_CH1_TX_ARES 19
-+#define NSS_CC_MAC2_RX_ARES 20
-+#define NSS_CC_MAC2_GEPHY1_RX_ARES 21
-+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_ARES 22
-+#define NSS_CC_MAC3_SRDS1_CH2_RX_ARES 23
-+#define NSS_CC_MAC3_TX_ARES 24
-+#define NSS_CC_MAC3_GEPHY2_TX_ARES 25
-+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_ARES 26
-+#define NSS_CC_MAC3_SRDS1_CH2_TX_ARES 27
-+#define NSS_CC_MAC3_RX_ARES 28
-+#define NSS_CC_MAC3_GEPHY2_RX_ARES 29
-+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_ARES 30
-+#define NSS_CC_MAC4_SRDS1_CH3_RX_ARES 31
-+#define NSS_CC_MAC4_TX_ARES 32
-+#define NSS_CC_MAC4_GEPHY3_TX_ARES 33
-+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_ARES 34
-+#define NSS_CC_MAC4_SRDS1_CH3_TX_ARES 35
-+#define NSS_CC_MAC4_RX_ARES 36
-+#define NSS_CC_MAC4_GEPHY3_RX_ARES 37
-+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_ARES 38
-+#define NSS_CC_MAC5_TX_ARES 39
-+#define NSS_CC_MAC5_TX_SRDS0_ARES 40
-+#define NSS_CC_MAC5_RX_ARES 41
-+#define NSS_CC_MAC5_RX_SRDS0_ARES 42
-+#define NSS_CC_AHB_ARES 43
-+#define NSS_CC_SEC_CTRL_AHB_ARES 44
-+#define NSS_CC_TLMM_ARES 45
-+#define NSS_CC_TLMM_AHB_ARES 46
-+#define NSS_CC_CNOC_AHB_ARES 47
-+#define NSS_CC_MDIO_AHB_ARES 48
-+#define NSS_CC_MDIO_MASTER_AHB_ARES 49
-+#define NSS_CC_SRDS0_SYS_ARES 50
-+#define NSS_CC_SRDS1_SYS_ARES 51
-+#define NSS_CC_GEPHY0_SYS_ARES 52
-+#define NSS_CC_GEPHY1_SYS_ARES 53
-+#define NSS_CC_GEPHY2_SYS_ARES 54
-+#define NSS_CC_GEPHY3_SYS_ARES 55
-+#define NSS_CC_SEC_CTRL_ARES 56
-+#define NSS_CC_SEC_CTRL_SENSE_ARES 57
-+#define NSS_CC_SLEEP_ARES 58
-+#define NSS_CC_DEBUG_ARES 59
-+#define NSS_CC_GEPHY0_ARES 60
-+#define NSS_CC_GEPHY1_ARES 61
-+#define NSS_CC_GEPHY2_ARES 62
-+#define NSS_CC_GEPHY3_ARES 63
-+#define NSS_CC_DSP_ARES 64
-+#define NSS_CC_GEPHY_FULL_ARES 65
-+#define NSS_CC_GLOBAL_ARES 66
-+#define NSS_CC_XPCS_ARES 67
-+#endif
+++ /dev/null
-From 2441b965c4c7adae0b4a7825f7acb67d44c3cd38 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 5 Jun 2024 20:45:41 +0800
-Subject: [PATCH] clk: qcom: add clock controller driver for qca8386/qca8084
-
-The clock controller driver of qca8386/qca8084 is registered
-as the MDIO device, the hardware register is accessed by MDIO bus
-that is normally used to access general PHY device, which is
-different from the current existed qcom clock controller drivers
-using ioremap to access hardware clock registers, nsscc-qca8k is
-accessed via an MDIO bus.
-
-MDIO bus is commonly utilized by both qca8386/qca8084 and other
-PHY devices, so the mutex lock mdio_bus->mdio_lock should be
-used instead of using the mutex lock of remap.
-
-To access the hardware clock registers of qca8386/qca8084, there
-is a special MDIO frame sequence, which needs to be sent to the
-device.
-
-Enable the reference clock before resetting the clock controller,
-the reference clock rate is fixed to 50MHZ.
-
-Reviewed-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
-Link: https://lore.kernel.org/r/20240605124541.2711467-5-quic_luoj@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- drivers/clk/qcom/Kconfig | 9 +
- drivers/clk/qcom/Makefile | 1 +
- drivers/clk/qcom/nsscc-qca8k.c | 2221 ++++++++++++++++++++++++++++++++
- 3 files changed, 2231 insertions(+)
- create mode 100644 drivers/clk/qcom/nsscc-qca8k.c
-
---- a/drivers/clk/qcom/Kconfig
-+++ b/drivers/clk/qcom/Kconfig
-@@ -204,6 +204,15 @@ config IPQ_GCC_9574
- i2c, USB, SD/eMMC, etc. Select this for the root clock
- of ipq9574.
-
-+config IPQ_NSSCC_QCA8K
-+ tristate "QCA8K(QCA8386 or QCA8084) NSS Clock Controller"
-+ depends on MDIO_BUS || COMPILE_TEST
-+ help
-+ Support for NSS(Network SubSystem) clock controller on
-+ qca8386/qca8084 chip.
-+ Say Y or M if you want to use network features of switch or
-+ PHY device. Select this for the root clock of qca8k.
-+
- config MSM_GCC_8660
- tristate "MSM8660 Global Clock Controller"
- depends on ARM || COMPILE_TEST
---- a/drivers/clk/qcom/Makefile
-+++ b/drivers/clk/qcom/Makefile
-@@ -31,6 +31,7 @@ obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806
- obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
- obj-$(CONFIG_IPQ_GCC_9574) += gcc-ipq9574.o
- obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
-+obj-$(CONFIG_IPQ_NSSCC_QCA8K) += nsscc-qca8k.o
- obj-$(CONFIG_MDM_GCC_9607) += gcc-mdm9607.o
- obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
- obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
---- /dev/null
-+++ b/drivers/clk/qcom/nsscc-qca8k.c
-@@ -0,0 +1,2221 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#include <linux/clk-provider.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/regmap.h>
-+#include <linux/phy.h>
-+#include <linux/mdio.h>
-+#include <linux/clk.h>
-+#include <linux/gpio/consumer.h>
-+
-+#include <dt-bindings/clock/qcom,qca8k-nsscc.h>
-+#include <dt-bindings/reset/qcom,qca8k-nsscc.h>
-+
-+#include "clk-branch.h"
-+#include "clk-rcg.h"
-+#include "clk-regmap.h"
-+#include "clk-regmap-divider.h"
-+#include "clk-regmap-mux.h"
-+#include "common.h"
-+#include "reset.h"
-+
-+#define QCA8K_CLK_REG_BASE 0x800000
-+#define QCA8K_HIGH_ADDR_PREFIX 0x18
-+#define QCA8K_LOW_ADDR_PREFIX 0x10
-+#define QCA8K_CFG_PAGE_REG 0xc
-+#define QCA8K_CLK_REG_MASK GENMASK(4, 0)
-+#define QCA8K_CLK_PHY_ADDR_MASK GENMASK(7, 5)
-+#define QCA8K_CLK_PAGE_MASK GENMASK(23, 8)
-+#define QCA8K_REG_DATA_UPPER_16_BITS BIT(1)
-+
-+enum {
-+ DT_XO,
-+ DT_UNIPHY0_RX_CLK,
-+ DT_UNIPHY0_TX_CLK,
-+ DT_UNIPHY1_RX_CLK,
-+ DT_UNIPHY1_TX_CLK,
-+ DT_UNIPHY1_RX312P5M_CLK,
-+ DT_UNIPHY1_TX312P5M_CLK,
-+};
-+
-+enum {
-+ P_XO,
-+ P_UNIPHY0_RX,
-+ P_UNIPHY0_TX,
-+ P_UNIPHY1_RX,
-+ P_UNIPHY1_TX,
-+ P_UNIPHY1_RX312P5M,
-+ P_UNIPHY1_TX312P5M,
-+ P_MAC4_RX_DIV,
-+ P_MAC4_TX_DIV,
-+ P_MAC5_RX_DIV,
-+ P_MAC5_TX_DIV,
-+};
-+
-+static const struct clk_parent_data nss_cc_uniphy1_tx312p5m_data[] = {
-+ { .index = DT_XO },
-+ { .index = DT_UNIPHY1_TX312P5M_CLK },
-+};
-+
-+static const struct parent_map nss_cc_uniphy1_tx312p5m_map[] = {
-+ { P_XO, 0 },
-+ { P_UNIPHY1_TX312P5M, 1 },
-+};
-+
-+static struct clk_rcg2 nss_cc_switch_core_clk_src = {
-+ .cmd_rcgr = 0x0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy1_tx312p5m_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_switch_core_clk_src",
-+ .parent_data = nss_cc_uniphy1_tx312p5m_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy1_tx312p5m_data),
-+ .ops = &clk_rcg2_mux_closest_ops,
-+ },
-+};
-+
-+static struct clk_branch nss_cc_switch_core_clk = {
-+ .halt_reg = 0x8,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x8,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_switch_core_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_switch_core_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_apb_bridge_clk = {
-+ .halt_reg = 0x10,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x10,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_apb_bridge_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_switch_core_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static const struct clk_parent_data nss_cc_uniphy1_tx_data[] = {
-+ { .index = DT_XO },
-+ { .index = DT_UNIPHY1_TX_CLK },
-+};
-+
-+static const struct parent_map nss_cc_uniphy1_tx_map[] = {
-+ { P_XO, 0 },
-+ { P_UNIPHY1_TX, 2 },
-+};
-+
-+static struct clk_rcg2 nss_cc_mac0_tx_clk_src = {
-+ .cmd_rcgr = 0x14,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy1_tx_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac0_tx_clk_src",
-+ .parent_data = nss_cc_uniphy1_tx_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy1_tx_data),
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_rcg2_mux_closest_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac0_tx_div_clk_src = {
-+ .reg = 0x1c,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac0_tx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac0_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac0_tx_clk = {
-+ .halt_reg = 0x20,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x20,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac0_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac0_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac0_tx_srds1_clk = {
-+ .halt_reg = 0x24,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x24,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac0_tx_srds1_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac0_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static const struct clk_parent_data nss_cc_uniphy1_rx_tx_data[] = {
-+ { .index = DT_XO },
-+ { .index = DT_UNIPHY1_RX_CLK },
-+ { .index = DT_UNIPHY1_TX_CLK },
-+};
-+
-+static const struct parent_map nss_cc_uniphy1_rx_tx_map[] = {
-+ { P_XO, 0 },
-+ { P_UNIPHY1_RX, 1 },
-+ { P_UNIPHY1_TX, 2 },
-+};
-+
-+static struct clk_rcg2 nss_cc_mac0_rx_clk_src = {
-+ .cmd_rcgr = 0x28,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy1_rx_tx_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac0_rx_clk_src",
-+ .parent_data = nss_cc_uniphy1_rx_tx_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy1_rx_tx_data),
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_rcg2_mux_closest_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac0_rx_div_clk_src = {
-+ .reg = 0x30,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac0_rx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac0_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac0_rx_clk = {
-+ .halt_reg = 0x34,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x34,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac0_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac0_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac0_rx_srds1_clk = {
-+ .halt_reg = 0x3c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x3c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac0_rx_srds1_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac0_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static const struct clk_parent_data nss_cc_uniphy1_rx_tx312p5m_data[] = {
-+ { .index = DT_XO },
-+ { .index = DT_UNIPHY1_TX312P5M_CLK },
-+ { .index = DT_UNIPHY1_RX312P5M_CLK },
-+};
-+
-+static const struct parent_map nss_cc_uniphy1_rx_tx312p5m_map[] = {
-+ { P_XO, 0 },
-+ { P_UNIPHY1_TX312P5M, 6 },
-+ { P_UNIPHY1_RX312P5M, 7 },
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_mac1_tx_clk_src_25[] = {
-+ C(P_UNIPHY1_TX312P5M, 12.5, 0, 0),
-+ C(P_UNIPHY1_RX312P5M, 12.5, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_mac1_tx_clk_src_125[] = {
-+ C(P_UNIPHY1_TX312P5M, 2.5, 0, 0),
-+ C(P_UNIPHY1_RX312P5M, 2.5, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_mac1_tx_clk_src_312p5[] = {
-+ C(P_UNIPHY1_TX312P5M, 1, 0, 0),
-+ C(P_UNIPHY1_RX312P5M, 1, 0, 0),
-+};
-+
-+static const struct freq_multi_tbl ftbl_nss_cc_mac1_tx_clk_src[] = {
-+ FM(25000000, ftbl_nss_cc_mac1_tx_clk_src_25),
-+ FMS(50000000, P_XO, 1, 0, 0),
-+ FM(125000000, ftbl_nss_cc_mac1_tx_clk_src_125),
-+ FM(312500000, ftbl_nss_cc_mac1_tx_clk_src_312p5),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_mac1_tx_clk_src = {
-+ .cmd_rcgr = 0x40,
-+ .freq_multi_tbl = ftbl_nss_cc_mac1_tx_clk_src,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy1_rx_tx312p5m_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_tx_clk_src",
-+ .parent_data = nss_cc_uniphy1_rx_tx312p5m_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy1_rx_tx312p5m_data),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac1_tx_div_clk_src = {
-+ .reg = 0x48,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_tx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac1_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac1_srds1_ch0_xgmii_rx_div_clk_src = {
-+ .reg = 0x4c,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_srds1_ch0_xgmii_rx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac1_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac1_srds1_ch0_rx_clk = {
-+ .halt_reg = 0x50,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x50,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_srds1_ch0_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac1_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac1_tx_clk = {
-+ .halt_reg = 0x54,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x54,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac1_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac1_gephy0_tx_clk = {
-+ .halt_reg = 0x58,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x58,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_gephy0_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac1_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac1_srds1_ch0_xgmii_rx_clk = {
-+ .halt_reg = 0x5c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x5c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_srds1_ch0_xgmii_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac1_srds1_ch0_xgmii_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static const struct clk_parent_data nss_cc_uniphy1_tx312p5m_prx_data[] = {
-+ { .index = DT_XO },
-+ { .index = DT_UNIPHY1_TX312P5M_CLK },
-+};
-+
-+static const struct parent_map nss_cc_uniphy1_tx312p5m_prx_map[] = {
-+ { P_XO, 0 },
-+ { P_UNIPHY1_TX312P5M, 6 },
-+};
-+
-+static const struct freq_tbl ftbl_nss_cc_mac1_rx_clk_src[] = {
-+ F(25000000, P_UNIPHY1_TX312P5M, 12.5, 0, 0),
-+ F(50000000, P_XO, 1, 0, 0),
-+ F(125000000, P_UNIPHY1_TX312P5M, 2.5, 0, 0),
-+ F(312500000, P_UNIPHY1_TX312P5M, 1, 0, 0),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_mac1_rx_clk_src = {
-+ .cmd_rcgr = 0x60,
-+ .freq_tbl = ftbl_nss_cc_mac1_rx_clk_src,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy1_tx312p5m_prx_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_rx_clk_src",
-+ .parent_data = nss_cc_uniphy1_tx312p5m_prx_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy1_tx312p5m_prx_data),
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac1_rx_div_clk_src = {
-+ .reg = 0x68,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_rx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac1_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac1_srds1_ch0_xgmii_tx_div_clk_src = {
-+ .reg = 0x6c,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_srds1_ch0_xgmii_tx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac1_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac1_srds1_ch0_tx_clk = {
-+ .halt_reg = 0x70,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x70,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_srds1_ch0_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac1_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac1_rx_clk = {
-+ .halt_reg = 0x74,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x74,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac1_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac1_gephy0_rx_clk = {
-+ .halt_reg = 0x78,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x78,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_gephy0_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac1_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac1_srds1_ch0_xgmii_tx_clk = {
-+ .halt_reg = 0x7c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x7c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac1_srds1_ch0_xgmii_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac1_srds1_ch0_xgmii_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_mac2_tx_clk_src = {
-+ .cmd_rcgr = 0x80,
-+ .freq_multi_tbl = ftbl_nss_cc_mac1_tx_clk_src,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy1_rx_tx312p5m_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_tx_clk_src",
-+ .parent_data = nss_cc_uniphy1_rx_tx312p5m_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy1_rx_tx312p5m_data),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac2_tx_div_clk_src = {
-+ .reg = 0x88,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_tx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac2_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac2_srds1_ch1_xgmii_rx_div_clk_src = {
-+ .reg = 0x8c,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_srds1_ch1_xgmii_rx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac2_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac2_srds1_ch1_rx_clk = {
-+ .halt_reg = 0x90,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x90,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_srds1_ch1_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac2_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac2_tx_clk = {
-+ .halt_reg = 0x94,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x94,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac2_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac2_gephy1_tx_clk = {
-+ .halt_reg = 0x98,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x98,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_gephy1_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac2_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac2_srds1_ch1_xgmii_rx_clk = {
-+ .halt_reg = 0x9c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x9c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_srds1_ch1_xgmii_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac2_srds1_ch1_xgmii_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_mac2_rx_clk_src = {
-+ .cmd_rcgr = 0xa0,
-+ .freq_tbl = ftbl_nss_cc_mac1_rx_clk_src,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy1_tx312p5m_prx_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_rx_clk_src",
-+ .parent_data = nss_cc_uniphy1_tx312p5m_prx_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy1_tx312p5m_prx_data),
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac2_rx_div_clk_src = {
-+ .reg = 0xa8,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_rx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac2_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac2_srds1_ch1_xgmii_tx_div_clk_src = {
-+ .reg = 0xac,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_srds1_ch1_xgmii_tx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac2_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac2_srds1_ch1_tx_clk = {
-+ .halt_reg = 0xb0,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xb0,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_srds1_ch1_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac2_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac2_rx_clk = {
-+ .halt_reg = 0xb4,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xb4,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac2_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac2_gephy1_rx_clk = {
-+ .halt_reg = 0xb8,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xb8,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_gephy1_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac2_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac2_srds1_ch1_xgmii_tx_clk = {
-+ .halt_reg = 0xbc,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xbc,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac2_srds1_ch1_xgmii_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac2_srds1_ch1_xgmii_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_mac3_tx_clk_src = {
-+ .cmd_rcgr = 0xc0,
-+ .freq_multi_tbl = ftbl_nss_cc_mac1_tx_clk_src,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy1_rx_tx312p5m_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_tx_clk_src",
-+ .parent_data = nss_cc_uniphy1_rx_tx312p5m_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy1_rx_tx312p5m_data),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac3_tx_div_clk_src = {
-+ .reg = 0xc8,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_tx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac3_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac3_srds1_ch2_xgmii_rx_div_clk_src = {
-+ .reg = 0xcc,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_srds1_ch2_xgmii_rx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac3_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac3_srds1_ch2_rx_clk = {
-+ .halt_reg = 0xd0,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xd0,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_srds1_ch2_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac3_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac3_tx_clk = {
-+ .halt_reg = 0xd4,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xd4,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac3_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac3_gephy2_tx_clk = {
-+ .halt_reg = 0xd8,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xd8,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_gephy2_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac3_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac3_srds1_ch2_xgmii_rx_clk = {
-+ .halt_reg = 0xdc,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xdc,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_srds1_ch2_xgmii_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac3_srds1_ch2_xgmii_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_mac3_rx_clk_src = {
-+ .cmd_rcgr = 0xe0,
-+ .freq_tbl = ftbl_nss_cc_mac1_rx_clk_src,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy1_tx312p5m_prx_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_rx_clk_src",
-+ .parent_data = nss_cc_uniphy1_tx312p5m_prx_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy1_tx312p5m_prx_data),
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac3_rx_div_clk_src = {
-+ .reg = 0xe8,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_rx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac3_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac3_srds1_ch2_xgmii_tx_div_clk_src = {
-+ .reg = 0xec,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_srds1_ch2_xgmii_tx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac3_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac3_srds1_ch2_tx_clk = {
-+ .halt_reg = 0xf0,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xf0,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_srds1_ch2_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac3_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac3_rx_clk = {
-+ .halt_reg = 0xf4,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xf4,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac3_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac3_gephy2_rx_clk = {
-+ .halt_reg = 0xf8,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xf8,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_gephy2_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac3_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac3_srds1_ch2_xgmii_tx_clk = {
-+ .halt_reg = 0xfc,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xfc,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac3_srds1_ch2_xgmii_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac3_srds1_ch2_xgmii_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static const struct clk_parent_data nss_cc_uniphy0_rx_uniphy1_rx_tx312p5m_data[] = {
-+ { .index = DT_XO },
-+ { .index = DT_UNIPHY0_RX_CLK },
-+ { .index = DT_UNIPHY1_TX312P5M_CLK },
-+ { .index = DT_UNIPHY1_RX312P5M_CLK },
-+};
-+
-+static const struct parent_map nss_cc_uniphy0_rx_uniphy1_rx_tx312p5m_map[] = {
-+ { P_XO, 0 },
-+ { P_UNIPHY0_RX, 1 },
-+ { P_UNIPHY1_TX312P5M, 3 },
-+ { P_UNIPHY1_RX312P5M, 7 },
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_mac4_tx_clk_src_25[] = {
-+ C(P_UNIPHY0_RX, 12.5, 0, 0),
-+ C(P_UNIPHY0_RX, 5, 0, 0),
-+ C(P_UNIPHY1_TX312P5M, 12.5, 0, 0),
-+ C(P_UNIPHY1_RX312P5M, 12.5, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_mac4_tx_clk_src_125[] = {
-+ C(P_UNIPHY0_RX, 1, 0, 0),
-+ C(P_UNIPHY0_RX, 2.5, 0, 0),
-+ C(P_UNIPHY1_TX312P5M, 2.5, 0, 0),
-+ C(P_UNIPHY1_RX312P5M, 2.5, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_mac4_tx_clk_src_312p5[] = {
-+ C(P_UNIPHY0_RX, 1, 0, 0),
-+ C(P_UNIPHY1_TX312P5M, 1, 0, 0),
-+ C(P_UNIPHY1_RX312P5M, 1, 0, 0),
-+};
-+
-+static const struct freq_multi_tbl ftbl_nss_cc_mac4_tx_clk_src[] = {
-+ FM(25000000, ftbl_nss_cc_mac4_tx_clk_src_25),
-+ FMS(50000000, P_XO, 1, 0, 0),
-+ FM(125000000, ftbl_nss_cc_mac4_tx_clk_src_125),
-+ FM(312500000, ftbl_nss_cc_mac4_tx_clk_src_312p5),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_mac4_tx_clk_src = {
-+ .cmd_rcgr = 0x100,
-+ .freq_multi_tbl = ftbl_nss_cc_mac4_tx_clk_src,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy0_rx_uniphy1_rx_tx312p5m_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_tx_clk_src",
-+ .parent_data = nss_cc_uniphy0_rx_uniphy1_rx_tx312p5m_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy0_rx_uniphy1_rx_tx312p5m_data),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac4_tx_div_clk_src = {
-+ .reg = 0x108,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_tx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac4_srds1_ch3_xgmii_rx_div_clk_src = {
-+ .reg = 0x10c,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_srds1_ch3_xgmii_rx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac4_srds1_ch3_rx_clk = {
-+ .halt_reg = 0x110,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x110,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_srds1_ch3_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac4_tx_clk = {
-+ .halt_reg = 0x114,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x114,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac4_gephy3_tx_clk = {
-+ .halt_reg = 0x118,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x118,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_gephy3_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac4_srds1_ch3_xgmii_rx_clk = {
-+ .halt_reg = 0x11c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x11c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_srds1_ch3_xgmii_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_srds1_ch3_xgmii_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static const struct clk_parent_data nss_cc_uniphy0_tx_uniphy1_tx312p5m_data[] = {
-+ { .index = DT_XO },
-+ { .index = DT_UNIPHY0_TX_CLK },
-+ { .index = DT_UNIPHY1_TX312P5M_CLK },
-+};
-+
-+static const struct parent_map nss_cc_uniphy0_tx_uniphy1_tx312p5m_map[] = {
-+ { P_XO, 0 },
-+ { P_UNIPHY0_TX, 2 },
-+ { P_UNIPHY1_TX312P5M, 3 },
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_mac4_rx_clk_src_25[] = {
-+ C(P_UNIPHY0_TX, 12.5, 0, 0),
-+ C(P_UNIPHY0_TX, 5, 0, 0),
-+ C(P_UNIPHY1_TX312P5M, 12.5, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_mac4_rx_clk_src_125[] = {
-+ C(P_UNIPHY0_TX, 1, 0, 0),
-+ C(P_UNIPHY0_TX, 2.5, 0, 0),
-+ C(P_UNIPHY1_TX312P5M, 2.5, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_mac4_rx_clk_src_312p5[] = {
-+ C(P_UNIPHY0_TX, 1, 0, 0),
-+ C(P_UNIPHY1_TX312P5M, 1, 0, 0),
-+};
-+
-+static const struct freq_multi_tbl ftbl_nss_cc_mac4_rx_clk_src[] = {
-+ FM(25000000, ftbl_nss_cc_mac4_rx_clk_src_25),
-+ FMS(50000000, P_XO, 1, 0, 0),
-+ FM(125000000, ftbl_nss_cc_mac4_rx_clk_src_125),
-+ FM(312500000, ftbl_nss_cc_mac4_rx_clk_src_312p5),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_mac4_rx_clk_src = {
-+ .cmd_rcgr = 0x120,
-+ .freq_multi_tbl = ftbl_nss_cc_mac4_rx_clk_src,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy0_tx_uniphy1_tx312p5m_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_rx_clk_src",
-+ .parent_data = nss_cc_uniphy0_tx_uniphy1_tx312p5m_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy0_tx_uniphy1_tx312p5m_data),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac4_rx_div_clk_src = {
-+ .reg = 0x128,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_rx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac4_srds1_ch3_xgmii_tx_div_clk_src = {
-+ .reg = 0x12c,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_srds1_ch3_xgmii_tx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac4_srds1_ch3_tx_clk = {
-+ .halt_reg = 0x130,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x130,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_srds1_ch3_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac4_rx_clk = {
-+ .halt_reg = 0x134,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x134,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac4_gephy3_rx_clk = {
-+ .halt_reg = 0x138,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x138,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_gephy3_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac4_srds1_ch3_xgmii_tx_clk = {
-+ .halt_reg = 0x13c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x13c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac4_srds1_ch3_xgmii_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_srds1_ch3_xgmii_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static const struct clk_parent_data nss_cc_uniphy0_tx_data[] = {
-+ { .index = DT_XO },
-+ { .index = DT_UNIPHY0_TX_CLK },
-+};
-+
-+static const struct parent_map nss_cc_uniphy0_tx_map[] = {
-+ { P_XO, 0 },
-+ { P_UNIPHY0_TX, 2 },
-+};
-+
-+static struct clk_rcg2 nss_cc_mac5_tx_clk_src = {
-+ .cmd_rcgr = 0x140,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy0_tx_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac5_tx_clk_src",
-+ .parent_data = nss_cc_uniphy0_tx_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy0_tx_data),
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_rcg2_mux_closest_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac5_tx_div_clk_src = {
-+ .reg = 0x148,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac5_tx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac5_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac5_tx_clk = {
-+ .halt_reg = 0x14c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x14c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac5_tx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac5_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static const struct clk_parent_data nss_cc_uniphy0_rx_tx_data[] = {
-+ { .index = DT_XO },
-+ { .index = DT_UNIPHY0_RX_CLK },
-+ { .index = DT_UNIPHY0_TX_CLK },
-+};
-+
-+static const struct parent_map nss_cc_uniphy0_rx_tx_map[] = {
-+ { P_XO, 0 },
-+ { P_UNIPHY0_RX, 1 },
-+ { P_UNIPHY0_TX, 2 },
-+};
-+
-+static struct clk_rcg2 nss_cc_mac5_rx_clk_src = {
-+ .cmd_rcgr = 0x154,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy0_rx_tx_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac5_rx_clk_src",
-+ .parent_data = nss_cc_uniphy0_rx_tx_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy0_rx_tx_data),
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_rcg2_mux_closest_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_mac5_rx_div_clk_src = {
-+ .reg = 0x15c,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac5_rx_div_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac5_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac5_rx_clk = {
-+ .halt_reg = 0x160,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x160,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac5_rx_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac5_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static const struct parent_map nss_cc_mac4_rx_div_mac5_tx_div_map[] = {
-+ { P_MAC4_RX_DIV, 0 },
-+ { P_MAC5_TX_DIV, 1 },
-+};
-+
-+static struct clk_regmap_mux nss_cc_mac5_tx_srds0_clk_src = {
-+ .reg = 0x300,
-+ .shift = 0,
-+ .width = 1,
-+ .parent_map = nss_cc_mac4_rx_div_mac5_tx_div_map,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac5_tx_srds0_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_rx_div_clk_src.clkr.hw,
-+ &nss_cc_mac5_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 2,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_mux_closest_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac5_tx_srds0_clk = {
-+ .halt_reg = 0x150,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x150,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac5_tx_srds0_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac5_tx_srds0_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static const struct parent_map nss_cc_mac4_tx_div_mac5_rx_div_map[] = {
-+ { P_MAC4_TX_DIV, 0 },
-+ { P_MAC5_RX_DIV, 1 },
-+};
-+
-+static struct clk_regmap_mux nss_cc_mac5_rx_srds0_clk_src = {
-+ .reg = 0x300,
-+ .shift = 1,
-+ .width = 1,
-+ .parent_map = nss_cc_mac4_tx_div_mac5_rx_div_map,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac5_rx_srds0_clk_src",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac4_tx_div_clk_src.clkr.hw,
-+ &nss_cc_mac5_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 2,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_mux_closest_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mac5_rx_srds0_clk = {
-+ .halt_reg = 0x164,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x164,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mac5_rx_srds0_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_mac5_rx_srds0_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static const struct parent_map nss_cc_uniphy1_tx312p5m_map2[] = {
-+ { P_XO, 0 },
-+ { P_UNIPHY1_TX312P5M, 2 },
-+};
-+
-+static const struct freq_tbl ftbl_nss_cc_ahb_clk_src[] = {
-+ F(50000000, P_XO, 1, 0, 0),
-+ F(104170000, P_UNIPHY1_TX312P5M, 3, 0, 0),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_ahb_clk_src = {
-+ .cmd_rcgr = 0x168,
-+ .freq_tbl = ftbl_nss_cc_ahb_clk_src,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_uniphy1_tx312p5m_map2,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ahb_clk_src",
-+ .parent_data = nss_cc_uniphy1_tx312p5m_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_uniphy1_tx312p5m_data),
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ahb_clk = {
-+ .halt_reg = 0x170,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x170,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ahb_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_ahb_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_sec_ctrl_ahb_clk = {
-+ .halt_reg = 0x174,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x174,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_sec_ctrl_ahb_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_ahb_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_tlmm_clk = {
-+ .halt_reg = 0x178,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x178,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_tlmm_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_ahb_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_tlmm_ahb_clk = {
-+ .halt_reg = 0x190,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x190,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_tlmm_ahb_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_ahb_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_cnoc_ahb_clk = {
-+ .halt_reg = 0x194,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x194,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_cnoc_ahb_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_ahb_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mdio_ahb_clk = {
-+ .halt_reg = 0x198,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x198,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mdio_ahb_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_ahb_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_mdio_master_ahb_clk = {
-+ .halt_reg = 0x19c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x19c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_mdio_master_ahb_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_ahb_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static const struct clk_parent_data nss_cc_xo_data[] = {
-+ { .index = DT_XO },
-+};
-+
-+static const struct parent_map nss_cc_xo_map[] = {
-+ { P_XO, 0 },
-+};
-+
-+static const struct freq_tbl ftbl_nss_cc_sys_clk_src[] = {
-+ F(25000000, P_XO, 2, 0, 0),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_sys_clk_src = {
-+ .cmd_rcgr = 0x1a0,
-+ .freq_tbl = ftbl_nss_cc_sys_clk_src,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_xo_map,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_sys_clk_src",
-+ .parent_data = nss_cc_xo_data,
-+ .num_parents = ARRAY_SIZE(nss_cc_xo_data),
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_branch nss_cc_srds0_sys_clk = {
-+ .halt_reg = 0x1a8,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x1a8,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_srds0_sys_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_sys_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_srds1_sys_clk = {
-+ .halt_reg = 0x1ac,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x1ac,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_srds1_sys_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_sys_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_gephy0_sys_clk = {
-+ .halt_reg = 0x1b0,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x1b0,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_gephy0_sys_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_sys_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_gephy1_sys_clk = {
-+ .halt_reg = 0x1b4,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x1b4,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_gephy1_sys_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_sys_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_gephy2_sys_clk = {
-+ .halt_reg = 0x1b8,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x1b8,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_gephy2_sys_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_sys_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_gephy3_sys_clk = {
-+ .halt_reg = 0x1bc,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x1bc,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_gephy3_sys_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &nss_cc_sys_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .ops = &clk_branch2_prepare_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_regmap *nss_cc_qca8k_clocks[] = {
-+ [NSS_CC_SWITCH_CORE_CLK_SRC] = &nss_cc_switch_core_clk_src.clkr,
-+ [NSS_CC_SWITCH_CORE_CLK] = &nss_cc_switch_core_clk.clkr,
-+ [NSS_CC_APB_BRIDGE_CLK] = &nss_cc_apb_bridge_clk.clkr,
-+ [NSS_CC_MAC0_TX_CLK_SRC] = &nss_cc_mac0_tx_clk_src.clkr,
-+ [NSS_CC_MAC0_TX_DIV_CLK_SRC] = &nss_cc_mac0_tx_div_clk_src.clkr,
-+ [NSS_CC_MAC0_TX_CLK] = &nss_cc_mac0_tx_clk.clkr,
-+ [NSS_CC_MAC0_TX_SRDS1_CLK] = &nss_cc_mac0_tx_srds1_clk.clkr,
-+ [NSS_CC_MAC0_RX_CLK_SRC] = &nss_cc_mac0_rx_clk_src.clkr,
-+ [NSS_CC_MAC0_RX_DIV_CLK_SRC] = &nss_cc_mac0_rx_div_clk_src.clkr,
-+ [NSS_CC_MAC0_RX_CLK] = &nss_cc_mac0_rx_clk.clkr,
-+ [NSS_CC_MAC0_RX_SRDS1_CLK] = &nss_cc_mac0_rx_srds1_clk.clkr,
-+ [NSS_CC_MAC1_TX_CLK_SRC] = &nss_cc_mac1_tx_clk_src.clkr,
-+ [NSS_CC_MAC1_TX_DIV_CLK_SRC] = &nss_cc_mac1_tx_div_clk_src.clkr,
-+ [NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_DIV_CLK_SRC] =
-+ &nss_cc_mac1_srds1_ch0_xgmii_rx_div_clk_src.clkr,
-+ [NSS_CC_MAC1_SRDS1_CH0_RX_CLK] = &nss_cc_mac1_srds1_ch0_rx_clk.clkr,
-+ [NSS_CC_MAC1_TX_CLK] = &nss_cc_mac1_tx_clk.clkr,
-+ [NSS_CC_MAC1_GEPHY0_TX_CLK] = &nss_cc_mac1_gephy0_tx_clk.clkr,
-+ [NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_CLK] = &nss_cc_mac1_srds1_ch0_xgmii_rx_clk.clkr,
-+ [NSS_CC_MAC1_RX_CLK_SRC] = &nss_cc_mac1_rx_clk_src.clkr,
-+ [NSS_CC_MAC1_RX_DIV_CLK_SRC] = &nss_cc_mac1_rx_div_clk_src.clkr,
-+ [NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_DIV_CLK_SRC] =
-+ &nss_cc_mac1_srds1_ch0_xgmii_tx_div_clk_src.clkr,
-+ [NSS_CC_MAC1_SRDS1_CH0_TX_CLK] = &nss_cc_mac1_srds1_ch0_tx_clk.clkr,
-+ [NSS_CC_MAC1_RX_CLK] = &nss_cc_mac1_rx_clk.clkr,
-+ [NSS_CC_MAC1_GEPHY0_RX_CLK] = &nss_cc_mac1_gephy0_rx_clk.clkr,
-+ [NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_CLK] = &nss_cc_mac1_srds1_ch0_xgmii_tx_clk.clkr,
-+ [NSS_CC_MAC2_TX_CLK_SRC] = &nss_cc_mac2_tx_clk_src.clkr,
-+ [NSS_CC_MAC2_TX_DIV_CLK_SRC] = &nss_cc_mac2_tx_div_clk_src.clkr,
-+ [NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_DIV_CLK_SRC] =
-+ &nss_cc_mac2_srds1_ch1_xgmii_rx_div_clk_src.clkr,
-+ [NSS_CC_MAC2_SRDS1_CH1_RX_CLK] = &nss_cc_mac2_srds1_ch1_rx_clk.clkr,
-+ [NSS_CC_MAC2_TX_CLK] = &nss_cc_mac2_tx_clk.clkr,
-+ [NSS_CC_MAC2_GEPHY1_TX_CLK] = &nss_cc_mac2_gephy1_tx_clk.clkr,
-+ [NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_CLK] = &nss_cc_mac2_srds1_ch1_xgmii_rx_clk.clkr,
-+ [NSS_CC_MAC2_RX_CLK_SRC] = &nss_cc_mac2_rx_clk_src.clkr,
-+ [NSS_CC_MAC2_RX_DIV_CLK_SRC] = &nss_cc_mac2_rx_div_clk_src.clkr,
-+ [NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_DIV_CLK_SRC] =
-+ &nss_cc_mac2_srds1_ch1_xgmii_tx_div_clk_src.clkr,
-+ [NSS_CC_MAC2_SRDS1_CH1_TX_CLK] = &nss_cc_mac2_srds1_ch1_tx_clk.clkr,
-+ [NSS_CC_MAC2_RX_CLK] = &nss_cc_mac2_rx_clk.clkr,
-+ [NSS_CC_MAC2_GEPHY1_RX_CLK] = &nss_cc_mac2_gephy1_rx_clk.clkr,
-+ [NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_CLK] = &nss_cc_mac2_srds1_ch1_xgmii_tx_clk.clkr,
-+ [NSS_CC_MAC3_TX_CLK_SRC] = &nss_cc_mac3_tx_clk_src.clkr,
-+ [NSS_CC_MAC3_TX_DIV_CLK_SRC] = &nss_cc_mac3_tx_div_clk_src.clkr,
-+ [NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_DIV_CLK_SRC] =
-+ &nss_cc_mac3_srds1_ch2_xgmii_rx_div_clk_src.clkr,
-+ [NSS_CC_MAC3_SRDS1_CH2_RX_CLK] = &nss_cc_mac3_srds1_ch2_rx_clk.clkr,
-+ [NSS_CC_MAC3_TX_CLK] = &nss_cc_mac3_tx_clk.clkr,
-+ [NSS_CC_MAC3_GEPHY2_TX_CLK] = &nss_cc_mac3_gephy2_tx_clk.clkr,
-+ [NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_CLK] = &nss_cc_mac3_srds1_ch2_xgmii_rx_clk.clkr,
-+ [NSS_CC_MAC3_RX_CLK_SRC] = &nss_cc_mac3_rx_clk_src.clkr,
-+ [NSS_CC_MAC3_RX_DIV_CLK_SRC] = &nss_cc_mac3_rx_div_clk_src.clkr,
-+ [NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_DIV_CLK_SRC] =
-+ &nss_cc_mac3_srds1_ch2_xgmii_tx_div_clk_src.clkr,
-+ [NSS_CC_MAC3_SRDS1_CH2_TX_CLK] = &nss_cc_mac3_srds1_ch2_tx_clk.clkr,
-+ [NSS_CC_MAC3_RX_CLK] = &nss_cc_mac3_rx_clk.clkr,
-+ [NSS_CC_MAC3_GEPHY2_RX_CLK] = &nss_cc_mac3_gephy2_rx_clk.clkr,
-+ [NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_CLK] = &nss_cc_mac3_srds1_ch2_xgmii_tx_clk.clkr,
-+ [NSS_CC_MAC4_TX_CLK_SRC] = &nss_cc_mac4_tx_clk_src.clkr,
-+ [NSS_CC_MAC4_TX_DIV_CLK_SRC] = &nss_cc_mac4_tx_div_clk_src.clkr,
-+ [NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_DIV_CLK_SRC] =
-+ &nss_cc_mac4_srds1_ch3_xgmii_rx_div_clk_src.clkr,
-+ [NSS_CC_MAC4_SRDS1_CH3_RX_CLK] = &nss_cc_mac4_srds1_ch3_rx_clk.clkr,
-+ [NSS_CC_MAC4_TX_CLK] = &nss_cc_mac4_tx_clk.clkr,
-+ [NSS_CC_MAC4_GEPHY3_TX_CLK] = &nss_cc_mac4_gephy3_tx_clk.clkr,
-+ [NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_CLK] = &nss_cc_mac4_srds1_ch3_xgmii_rx_clk.clkr,
-+ [NSS_CC_MAC4_RX_CLK_SRC] = &nss_cc_mac4_rx_clk_src.clkr,
-+ [NSS_CC_MAC4_RX_DIV_CLK_SRC] = &nss_cc_mac4_rx_div_clk_src.clkr,
-+ [NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_DIV_CLK_SRC] =
-+ &nss_cc_mac4_srds1_ch3_xgmii_tx_div_clk_src.clkr,
-+ [NSS_CC_MAC4_SRDS1_CH3_TX_CLK] = &nss_cc_mac4_srds1_ch3_tx_clk.clkr,
-+ [NSS_CC_MAC4_RX_CLK] = &nss_cc_mac4_rx_clk.clkr,
-+ [NSS_CC_MAC4_GEPHY3_RX_CLK] = &nss_cc_mac4_gephy3_rx_clk.clkr,
-+ [NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_CLK] = &nss_cc_mac4_srds1_ch3_xgmii_tx_clk.clkr,
-+ [NSS_CC_MAC5_TX_CLK_SRC] = &nss_cc_mac5_tx_clk_src.clkr,
-+ [NSS_CC_MAC5_TX_DIV_CLK_SRC] = &nss_cc_mac5_tx_div_clk_src.clkr,
-+ [NSS_CC_MAC5_TX_SRDS0_CLK] = &nss_cc_mac5_tx_srds0_clk.clkr,
-+ [NSS_CC_MAC5_TX_CLK] = &nss_cc_mac5_tx_clk.clkr,
-+ [NSS_CC_MAC5_RX_CLK_SRC] = &nss_cc_mac5_rx_clk_src.clkr,
-+ [NSS_CC_MAC5_RX_DIV_CLK_SRC] = &nss_cc_mac5_rx_div_clk_src.clkr,
-+ [NSS_CC_MAC5_RX_SRDS0_CLK] = &nss_cc_mac5_rx_srds0_clk.clkr,
-+ [NSS_CC_MAC5_RX_CLK] = &nss_cc_mac5_rx_clk.clkr,
-+ [NSS_CC_MAC5_TX_SRDS0_CLK_SRC] = &nss_cc_mac5_tx_srds0_clk_src.clkr,
-+ [NSS_CC_MAC5_RX_SRDS0_CLK_SRC] = &nss_cc_mac5_rx_srds0_clk_src.clkr,
-+ [NSS_CC_AHB_CLK_SRC] = &nss_cc_ahb_clk_src.clkr,
-+ [NSS_CC_AHB_CLK] = &nss_cc_ahb_clk.clkr,
-+ [NSS_CC_SEC_CTRL_AHB_CLK] = &nss_cc_sec_ctrl_ahb_clk.clkr,
-+ [NSS_CC_TLMM_CLK] = &nss_cc_tlmm_clk.clkr,
-+ [NSS_CC_TLMM_AHB_CLK] = &nss_cc_tlmm_ahb_clk.clkr,
-+ [NSS_CC_CNOC_AHB_CLK] = &nss_cc_cnoc_ahb_clk.clkr,
-+ [NSS_CC_MDIO_AHB_CLK] = &nss_cc_mdio_ahb_clk.clkr,
-+ [NSS_CC_MDIO_MASTER_AHB_CLK] = &nss_cc_mdio_master_ahb_clk.clkr,
-+ [NSS_CC_SYS_CLK_SRC] = &nss_cc_sys_clk_src.clkr,
-+ [NSS_CC_SRDS0_SYS_CLK] = &nss_cc_srds0_sys_clk.clkr,
-+ [NSS_CC_SRDS1_SYS_CLK] = &nss_cc_srds1_sys_clk.clkr,
-+ [NSS_CC_GEPHY0_SYS_CLK] = &nss_cc_gephy0_sys_clk.clkr,
-+ [NSS_CC_GEPHY1_SYS_CLK] = &nss_cc_gephy1_sys_clk.clkr,
-+ [NSS_CC_GEPHY2_SYS_CLK] = &nss_cc_gephy2_sys_clk.clkr,
-+ [NSS_CC_GEPHY3_SYS_CLK] = &nss_cc_gephy3_sys_clk.clkr,
-+};
-+
-+static const struct qcom_reset_map nss_cc_qca8k_resets[] = {
-+ [NSS_CC_SWITCH_CORE_ARES] = { 0xc, 2 },
-+ [NSS_CC_APB_BRIDGE_ARES] = { 0x10, 2 },
-+ [NSS_CC_MAC0_TX_ARES] = { 0x20, 2 },
-+ [NSS_CC_MAC0_TX_SRDS1_ARES] = { 0x24, 2 },
-+ [NSS_CC_MAC0_RX_ARES] = { 0x34, 2 },
-+ [NSS_CC_MAC0_RX_SRDS1_ARES] = { 0x3c, 2 },
-+ [NSS_CC_MAC1_SRDS1_CH0_RX_ARES] = { 0x50, 2 },
-+ [NSS_CC_MAC1_TX_ARES] = { 0x54, 2 },
-+ [NSS_CC_MAC1_GEPHY0_TX_ARES] = { 0x58, 2 },
-+ [NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_ARES] = { 0x5c, 2 },
-+ [NSS_CC_MAC1_SRDS1_CH0_TX_ARES] = { 0x70, 2 },
-+ [NSS_CC_MAC1_RX_ARES] = { 0x74, 2 },
-+ [NSS_CC_MAC1_GEPHY0_RX_ARES] = { 0x78, 2 },
-+ [NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_ARES] = { 0x7c, 2 },
-+ [NSS_CC_MAC2_SRDS1_CH1_RX_ARES] = { 0x90, 2 },
-+ [NSS_CC_MAC2_TX_ARES] = { 0x94, 2 },
-+ [NSS_CC_MAC2_GEPHY1_TX_ARES] = { 0x98, 2 },
-+ [NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_ARES] = { 0x9c, 2 },
-+ [NSS_CC_MAC2_SRDS1_CH1_TX_ARES] = { 0xb0, 2 },
-+ [NSS_CC_MAC2_RX_ARES] = { 0xb4, 2 },
-+ [NSS_CC_MAC2_GEPHY1_RX_ARES] = { 0xb8, 2 },
-+ [NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_ARES] = { 0xbc, 2 },
-+ [NSS_CC_MAC3_SRDS1_CH2_RX_ARES] = { 0xd0, 2 },
-+ [NSS_CC_MAC3_TX_ARES] = { 0xd4, 2 },
-+ [NSS_CC_MAC3_GEPHY2_TX_ARES] = { 0xd8, 2 },
-+ [NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_ARES] = { 0xdc, 2 },
-+ [NSS_CC_MAC3_SRDS1_CH2_TX_ARES] = { 0xf0, 2 },
-+ [NSS_CC_MAC3_RX_ARES] = { 0xf4, 2 },
-+ [NSS_CC_MAC3_GEPHY2_RX_ARES] = { 0xf8, 2 },
-+ [NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_ARES] = { 0xfc, 2 },
-+ [NSS_CC_MAC4_SRDS1_CH3_RX_ARES] = { 0x110, 2 },
-+ [NSS_CC_MAC4_TX_ARES] = { 0x114, 2 },
-+ [NSS_CC_MAC4_GEPHY3_TX_ARES] = { 0x118, 2 },
-+ [NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_ARES] = { 0x11c, 2 },
-+ [NSS_CC_MAC4_SRDS1_CH3_TX_ARES] = { 0x130, 2 },
-+ [NSS_CC_MAC4_RX_ARES] = { 0x134, 2 },
-+ [NSS_CC_MAC4_GEPHY3_RX_ARES] = { 0x138, 2 },
-+ [NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_ARES] = { 0x13c, 2 },
-+ [NSS_CC_MAC5_TX_ARES] = { 0x14c, 2 },
-+ [NSS_CC_MAC5_TX_SRDS0_ARES] = { 0x150, 2 },
-+ [NSS_CC_MAC5_RX_ARES] = { 0x160, 2 },
-+ [NSS_CC_MAC5_RX_SRDS0_ARES] = { 0x164, 2 },
-+ [NSS_CC_AHB_ARES] = { 0x170, 2 },
-+ [NSS_CC_SEC_CTRL_AHB_ARES] = { 0x174, 2 },
-+ [NSS_CC_TLMM_ARES] = { 0x178, 2 },
-+ [NSS_CC_TLMM_AHB_ARES] = { 0x190, 2 },
-+ [NSS_CC_CNOC_AHB_ARES] = { 0x194, 2 }, /* reset CNOC AHB & APB */
-+ [NSS_CC_MDIO_AHB_ARES] = { 0x198, 2 },
-+ [NSS_CC_MDIO_MASTER_AHB_ARES] = { 0x19c, 2 },
-+ [NSS_CC_SRDS0_SYS_ARES] = { 0x1a8, 2 },
-+ [NSS_CC_SRDS1_SYS_ARES] = { 0x1ac, 2 },
-+ [NSS_CC_GEPHY0_SYS_ARES] = { 0x1b0, 2 },
-+ [NSS_CC_GEPHY1_SYS_ARES] = { 0x1b4, 2 },
-+ [NSS_CC_GEPHY2_SYS_ARES] = { 0x1b8, 2 },
-+ [NSS_CC_GEPHY3_SYS_ARES] = { 0x1bc, 2 },
-+ [NSS_CC_SEC_CTRL_ARES] = { 0x1c8, 2 },
-+ [NSS_CC_SEC_CTRL_SENSE_ARES] = { 0x1d0, 2 },
-+ [NSS_CC_SLEEP_ARES] = { 0x1e0, 2 },
-+ [NSS_CC_DEBUG_ARES] = { 0x1e8, 2 },
-+ [NSS_CC_GEPHY0_ARES] = { 0x304, 0 },
-+ [NSS_CC_GEPHY1_ARES] = { 0x304, 1 },
-+ [NSS_CC_GEPHY2_ARES] = { 0x304, 2 },
-+ [NSS_CC_GEPHY3_ARES] = { 0x304, 3 },
-+ [NSS_CC_DSP_ARES] = { 0x304, 4 },
-+ [NSS_CC_GEPHY_FULL_ARES] = { .reg = 0x304, .bitmask = GENMASK(4, 0) },
-+ [NSS_CC_GLOBAL_ARES] = { 0x308, 0 },
-+ [NSS_CC_XPCS_ARES] = { 0x30c, 0 },
-+};
-+
-+/* For each read/write operation of clock register, there are three MDIO frames
-+ * sent to the device.
-+ *
-+ * 1. The high address part[23:8] of register is packaged into the first MDIO frame
-+ * for selecting page.
-+ * 2. The low address part[7:0] of register is packaged into the second MDIO frame
-+ * with the low 16bit data to read/write.
-+ * 3. The low address part[7:0] of register is packaged into the last MDIO frame
-+ * with the high 16bit data to read/write.
-+ *
-+ * The clause22 MDIO frame format used by device is as below.
-+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-+ * | ST| OP| ADDR | REG | TA| DATA |
-+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-+ */
-+static inline void convert_reg_to_mii_addr(u32 regaddr, u16 *reg, u16 *phy_addr, u16 *page)
-+{
-+ *reg = FIELD_GET(QCA8K_CLK_REG_MASK, regaddr);
-+ *phy_addr = FIELD_GET(QCA8K_CLK_PHY_ADDR_MASK, regaddr) | QCA8K_LOW_ADDR_PREFIX;
-+ *page = FIELD_GET(QCA8K_CLK_PAGE_MASK, regaddr);
-+}
-+
-+static int qca8k_mii_read(struct mii_bus *bus, u16 switch_phy_id, u32 reg, u32 *val)
-+{
-+ int ret, data;
-+
-+ ret = __mdiobus_read(bus, switch_phy_id, reg);
-+ if (ret >= 0) {
-+ data = ret;
-+
-+ ret = __mdiobus_read(bus, switch_phy_id, (reg | QCA8K_REG_DATA_UPPER_16_BITS));
-+ if (ret >= 0)
-+ *val = data | ret << 16;
-+ }
-+
-+ if (ret < 0)
-+ dev_err_ratelimited(&bus->dev, "fail to read qca8k mii register\n");
-+
-+ return ret < 0 ? ret : 0;
-+}
-+
-+static void qca8k_mii_write(struct mii_bus *bus, u16 switch_phy_id, u32 reg, u32 val)
-+{
-+ int ret;
-+
-+ ret = __mdiobus_write(bus, switch_phy_id, reg, lower_16_bits(val));
-+ if (ret >= 0)
-+ ret = __mdiobus_write(bus, switch_phy_id, (reg | QCA8K_REG_DATA_UPPER_16_BITS),
-+ upper_16_bits(val));
-+
-+ if (ret < 0)
-+ dev_err_ratelimited(&bus->dev, "fail to write qca8k mii register\n");
-+}
-+
-+static int qca8k_mii_page_set(struct mii_bus *bus, u16 switch_phy_id, u32 reg, u16 page)
-+{
-+ int ret;
-+
-+ ret = __mdiobus_write(bus, switch_phy_id, reg, page);
-+ if (ret < 0)
-+ dev_err_ratelimited(&bus->dev, "fail to set page\n");
-+
-+ return ret;
-+}
-+
-+static int qca8k_regmap_read(void *context, unsigned int regaddr, unsigned int *val)
-+{
-+ struct mii_bus *bus = context;
-+ u16 reg, phy_addr, page;
-+ int ret;
-+
-+ regaddr += QCA8K_CLK_REG_BASE;
-+ convert_reg_to_mii_addr(regaddr, ®, &phy_addr, &page);
-+
-+ mutex_lock(&bus->mdio_lock);
-+ ret = qca8k_mii_page_set(bus, QCA8K_HIGH_ADDR_PREFIX, QCA8K_CFG_PAGE_REG, page);
-+ if (ret < 0)
-+ goto qca8k_read_exit;
-+
-+ ret = qca8k_mii_read(bus, phy_addr, reg, val);
-+
-+qca8k_read_exit:
-+ mutex_unlock(&bus->mdio_lock);
-+ return ret;
-+};
-+
-+static int qca8k_regmap_write(void *context, unsigned int regaddr, unsigned int val)
-+{
-+ struct mii_bus *bus = context;
-+ u16 reg, phy_addr, page;
-+ int ret;
-+
-+ regaddr += QCA8K_CLK_REG_BASE;
-+ convert_reg_to_mii_addr(regaddr, ®, &phy_addr, &page);
-+
-+ mutex_lock(&bus->mdio_lock);
-+ ret = qca8k_mii_page_set(bus, QCA8K_HIGH_ADDR_PREFIX, QCA8K_CFG_PAGE_REG, page);
-+ if (ret < 0)
-+ goto qca8k_write_exit;
-+
-+ qca8k_mii_write(bus, phy_addr, reg, val);
-+
-+qca8k_write_exit:
-+ mutex_unlock(&bus->mdio_lock);
-+ return ret;
-+};
-+
-+static int qca8k_regmap_update_bits(void *context, unsigned int regaddr,
-+ unsigned int mask, unsigned int value)
-+{
-+ struct mii_bus *bus = context;
-+ u16 reg, phy_addr, page;
-+ int ret;
-+ u32 val;
-+
-+ regaddr += QCA8K_CLK_REG_BASE;
-+ convert_reg_to_mii_addr(regaddr, ®, &phy_addr, &page);
-+
-+ mutex_lock(&bus->mdio_lock);
-+ ret = qca8k_mii_page_set(bus, QCA8K_HIGH_ADDR_PREFIX, QCA8K_CFG_PAGE_REG, page);
-+ if (ret < 0)
-+ goto qca8k_update_exit;
-+
-+ ret = qca8k_mii_read(bus, phy_addr, reg, &val);
-+ if (ret < 0)
-+ goto qca8k_update_exit;
-+
-+ val &= ~mask;
-+ val |= value;
-+ qca8k_mii_write(bus, phy_addr, reg, val);
-+
-+qca8k_update_exit:
-+ mutex_unlock(&bus->mdio_lock);
-+ return ret;
-+}
-+
-+static const struct regmap_config nss_cc_qca8k_regmap_config = {
-+ .reg_bits = 12,
-+ .reg_stride = 4,
-+ .val_bits = 32,
-+ .max_register = 0x30c,
-+ .reg_read = qca8k_regmap_read,
-+ .reg_write = qca8k_regmap_write,
-+ .reg_update_bits = qca8k_regmap_update_bits,
-+ .disable_locking = true,
-+};
-+
-+static const struct qcom_cc_desc nss_cc_qca8k_desc = {
-+ .config = &nss_cc_qca8k_regmap_config,
-+ .clks = nss_cc_qca8k_clocks,
-+ .num_clks = ARRAY_SIZE(nss_cc_qca8k_clocks),
-+ .resets = nss_cc_qca8k_resets,
-+ .num_resets = ARRAY_SIZE(nss_cc_qca8k_resets),
-+};
-+
-+/*
-+ * The reference clock of QCA8k NSSCC needs to be enabled to make sure
-+ * the GPIO reset taking effect.
-+ */
-+static int nss_cc_qca8k_clock_enable_and_reset(struct device *dev)
-+{
-+ struct gpio_desc *gpiod;
-+ struct clk *clk;
-+
-+ clk = devm_clk_get_enabled(dev, NULL);
-+ if (IS_ERR(clk))
-+ return PTR_ERR(clk);
-+
-+ gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
-+ if (IS_ERR(gpiod)) {
-+ return PTR_ERR(gpiod);
-+ } else if (gpiod) {
-+ msleep(100);
-+ gpiod_set_value_cansleep(gpiod, 0);
-+ }
-+
-+ return 0;
-+}
-+
-+static int nss_cc_qca8k_probe(struct mdio_device *mdiodev)
-+{
-+ struct regmap *regmap;
-+ int ret;
-+
-+ ret = nss_cc_qca8k_clock_enable_and_reset(&mdiodev->dev);
-+ if (ret)
-+ return dev_err_probe(&mdiodev->dev, ret, "Fail to reset NSSCC\n");
-+
-+ regmap = devm_regmap_init(&mdiodev->dev, NULL, mdiodev->bus, nss_cc_qca8k_desc.config);
-+ if (IS_ERR(regmap))
-+ return dev_err_probe(&mdiodev->dev, PTR_ERR(regmap), "Failed to init regmap\n");
-+
-+ return qcom_cc_really_probe(&mdiodev->dev, &nss_cc_qca8k_desc, regmap);
-+}
-+
-+static const struct of_device_id nss_cc_qca8k_match_table[] = {
-+ { .compatible = "qcom,qca8084-nsscc" },
-+ { }
-+};
-+MODULE_DEVICE_TABLE(of, nss_cc_qca8k_match_table);
-+
-+static struct mdio_driver nss_cc_qca8k_driver = {
-+ .mdiodrv.driver = {
-+ .name = "qcom,qca8k-nsscc",
-+ .of_match_table = nss_cc_qca8k_match_table,
-+ },
-+ .probe = nss_cc_qca8k_probe,
-+};
-+
-+mdio_module_driver(nss_cc_qca8k_driver);
-+
-+MODULE_DESCRIPTION("QCOM NSS_CC QCA8K Driver");
-+MODULE_LICENSE("GPL");
+++ /dev/null
-From e60ac570137b42ef61a01a6b26133a8e2d7e8d4b Mon Sep 17 00:00:00 2001
-From: Alexandru Gagniuc <mr.nuke.me@gmail.com>
-Date: Mon, 6 May 2024 21:47:58 -0500
-Subject: [PATCH] arm64: dts: qcom: ipq9574: add MDIO bus
-
-The IPQ95xx uses an IPQ4019 compatible MDIO controller that is already
-supported. Add a DT node to expose it.
-
-Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
-Link: https://lore.kernel.org/r/20240507024758.2810514-2-mr.nuke.me@gmail.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 10 ++++++++++
- 1 file changed, 10 insertions(+)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -218,6 +218,16 @@
- clock-names = "core";
- };
-
-+ mdio: mdio@90000 {
-+ compatible = "qcom,ipq9574-mdio", "qcom,ipq4019-mdio";
-+ reg = <0x00090000 0x64>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ clocks = <&gcc GCC_MDIO_AHB_CLK>;
-+ clock-names = "gcc_mdio_ahb_clk";
-+ status = "disabled";
-+ };
-+
- qfprom: efuse@a4000 {
- compatible = "qcom,ipq9574-qfprom", "qcom,qfprom";
- reg = <0x000a4000 0x5a1>;
+++ /dev/null
-From d06b1043644a1831ab141bbee2669002bba15b0f Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Wed, 20 Dec 2023 23:17:22 +0100
-Subject: [PATCH 1/2] clk: qcom: clk-rcg: introduce support for multiple conf
- for same freq
-
-Some RCG frequency can be reached by multiple configuration.
-
-We currently declare multiple configuration for the same frequency but
-that is not supported and always the first configuration will be taken.
-
-These multiple configuration are needed as based on the current parent
-configuration, it may be needed to use a different configuration to
-reach the same frequency.
-
-To handle this introduce 3 new macro, C, FM and FMS:
-
-- C is used to declare a freq_conf where src, pre_div, m and n are
- provided.
-
-- FM is used to declare a freq_multi_tbl with the frequency and an
- array of confs to insert all the config for the provided frequency.
-
-- FMS is used to declare a freq_multi_tbl with the frequency and an
- array of a single conf with the provided src, pre_div, m and n.
-
-Struct clk_rcg2 is changed to add a union type to reference a simple
-freq_tbl or a complex freq_multi_tbl.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Acked-by: Stephen Boyd <sboyd@kernel.org>
-Link: https://lore.kernel.org/r/20231220221724.3822-2-ansuelsmth@gmail.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- drivers/clk/qcom/clk-rcg.h | 23 ++++++++++++++++++++++-
- 1 file changed, 22 insertions(+), 1 deletion(-)
-
---- a/drivers/clk/qcom/clk-rcg.h
-+++ b/drivers/clk/qcom/clk-rcg.h
-@@ -17,6 +17,23 @@ struct freq_tbl {
- u16 n;
- };
-
-+#define C(s, h, m, n) { (s), (2 * (h) - 1), (m), (n) }
-+#define FM(f, confs) { (f), ARRAY_SIZE(confs), (confs) }
-+#define FMS(f, s, h, m, n) { (f), 1, (const struct freq_conf []){ C(s, h, m, n) } }
-+
-+struct freq_conf {
-+ u8 src;
-+ u8 pre_div;
-+ u16 m;
-+ u16 n;
-+};
-+
-+struct freq_multi_tbl {
-+ unsigned long freq;
-+ size_t num_confs;
-+ const struct freq_conf *confs;
-+};
-+
- /**
- * struct mn - M/N:D counter
- * @mnctr_en_bit: bit to enable mn counter
-@@ -138,6 +155,7 @@ extern const struct clk_ops clk_dyn_rcg_
- * @safe_src_index: safe src index value
- * @parent_map: map from software's parent index to hardware's src_sel field
- * @freq_tbl: frequency table
-+ * @freq_multi_tbl: frequency table for clocks reachable with multiple RCGs conf
- * @clkr: regmap clock handle
- * @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG
- * @parked_cfg: cached value of the CFG register for parked RCGs
-@@ -149,7 +167,10 @@ struct clk_rcg2 {
- u8 hid_width;
- u8 safe_src_index;
- const struct parent_map *parent_map;
-- const struct freq_tbl *freq_tbl;
-+ union {
-+ const struct freq_tbl *freq_tbl;
-+ const struct freq_multi_tbl *freq_multi_tbl;
-+ };
- struct clk_regmap clkr;
- u8 cfg_off;
- u32 parked_cfg;
+++ /dev/null
-From 89da22456af0762477d8c1345fdd17961b3ada80 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Wed, 20 Dec 2023 23:17:23 +0100
-Subject: [PATCH 2/2] clk: qcom: clk-rcg2: add support for rcg2 freq multi ops
-
-Some RCG frequency can be reached by multiple configuration.
-
-Add clk_rcg2_fm_ops ops to support these special RCG configurations.
-
-These alternative ops will select the frequency using a CEIL policy.
-
-When the correct frequency is found, the correct config is selected by
-calculating the final rate (by checking the defined parent and values
-in the config that is being checked) and deciding based on the one that
-is less different than the requested one.
-
-These check are skipped if there is just one config for the requested
-freq.
-
-qcom_find_freq_multi is added to search the freq with the new struct
-freq_multi_tbl.
-__clk_rcg2_select_conf is used to select the correct conf by simulating
-the final clock.
-If a conf can't be found due to parent not reachable, a WARN is printed
-and -EINVAL is returned.
-
-Tested-by: Wei Lei <quic_leiwei@quicinc.com>
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Acked-by: Stephen Boyd <sboyd@kernel.org>
-Link: https://lore.kernel.org/r/20231220221724.3822-3-ansuelsmth@gmail.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- drivers/clk/qcom/clk-rcg.h | 1 +
- drivers/clk/qcom/clk-rcg2.c | 166 ++++++++++++++++++++++++++++++++++++
- drivers/clk/qcom/common.c | 18 ++++
- drivers/clk/qcom/common.h | 2 +
- 4 files changed, 187 insertions(+)
-
---- a/drivers/clk/qcom/clk-rcg.h
-+++ b/drivers/clk/qcom/clk-rcg.h
-@@ -190,6 +190,7 @@ struct clk_rcg2_gfx3d {
-
- extern const struct clk_ops clk_rcg2_ops;
- extern const struct clk_ops clk_rcg2_floor_ops;
-+extern const struct clk_ops clk_rcg2_fm_ops;
- extern const struct clk_ops clk_rcg2_mux_closest_ops;
- extern const struct clk_ops clk_edp_pixel_ops;
- extern const struct clk_ops clk_byte_ops;
---- a/drivers/clk/qcom/clk-rcg2.c
-+++ b/drivers/clk/qcom/clk-rcg2.c
-@@ -260,6 +260,115 @@ static int _freq_tbl_determine_rate(stru
- return 0;
- }
-
-+static const struct freq_conf *
-+__clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f,
-+ unsigned long req_rate)
-+{
-+ unsigned long rate_diff, best_rate_diff = ULONG_MAX;
-+ const struct freq_conf *conf, *best_conf = NULL;
-+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-+ const char *name = clk_hw_get_name(hw);
-+ unsigned long parent_rate, rate;
-+ struct clk_hw *p;
-+ int index, i;
-+
-+ /* Exit early if only one config is defined */
-+ if (f->num_confs == 1) {
-+ best_conf = f->confs;
-+ goto exit;
-+ }
-+
-+ /* Search in each provided config the one that is near the wanted rate */
-+ for (i = 0, conf = f->confs; i < f->num_confs; i++, conf++) {
-+ index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
-+ if (index < 0)
-+ continue;
-+
-+ p = clk_hw_get_parent_by_index(hw, index);
-+ if (!p)
-+ continue;
-+
-+ parent_rate = clk_hw_get_rate(p);
-+ rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div);
-+
-+ if (rate == req_rate) {
-+ best_conf = conf;
-+ goto exit;
-+ }
-+
-+ rate_diff = abs_diff(req_rate, rate);
-+ if (rate_diff < best_rate_diff) {
-+ best_rate_diff = rate_diff;
-+ best_conf = conf;
-+ }
-+ }
-+
-+ /*
-+ * Very unlikely. Warn if we couldn't find a correct config
-+ * due to parent not found in every config.
-+ */
-+ if (unlikely(!best_conf)) {
-+ WARN(1, "%s: can't find a configuration for rate %lu\n",
-+ name, req_rate);
-+ return ERR_PTR(-EINVAL);
-+ }
-+
-+exit:
-+ return best_conf;
-+}
-+
-+static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_multi_tbl *f,
-+ struct clk_rate_request *req)
-+{
-+ unsigned long clk_flags, rate = req->rate;
-+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-+ const struct freq_conf *conf;
-+ struct clk_hw *p;
-+ int index;
-+
-+ f = qcom_find_freq_multi(f, rate);
-+ if (!f || !f->confs)
-+ return -EINVAL;
-+
-+ conf = __clk_rcg2_select_conf(hw, f, rate);
-+ if (IS_ERR(conf))
-+ return PTR_ERR(conf);
-+ index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
-+ if (index < 0)
-+ return index;
-+
-+ clk_flags = clk_hw_get_flags(hw);
-+ p = clk_hw_get_parent_by_index(hw, index);
-+ if (!p)
-+ return -EINVAL;
-+
-+ if (clk_flags & CLK_SET_RATE_PARENT) {
-+ rate = f->freq;
-+ if (conf->pre_div) {
-+ if (!rate)
-+ rate = req->rate;
-+ rate /= 2;
-+ rate *= conf->pre_div + 1;
-+ }
-+
-+ if (conf->n) {
-+ u64 tmp = rate;
-+
-+ tmp = tmp * conf->n;
-+ do_div(tmp, conf->m);
-+ rate = tmp;
-+ }
-+ } else {
-+ rate = clk_hw_get_rate(p);
-+ }
-+
-+ req->best_parent_hw = p;
-+ req->best_parent_rate = rate;
-+ req->rate = f->freq;
-+
-+ return 0;
-+}
-+
- static int clk_rcg2_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
- {
-@@ -276,6 +385,14 @@ static int clk_rcg2_determine_floor_rate
- return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
- }
-
-+static int clk_rcg2_fm_determine_rate(struct clk_hw *hw,
-+ struct clk_rate_request *req)
-+{
-+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-+
-+ return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req);
-+}
-+
- static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
- u32 *_cfg)
- {
-@@ -371,6 +488,30 @@ static int __clk_rcg2_set_rate(struct cl
- return clk_rcg2_configure(rcg, f);
- }
-
-+static int __clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate)
-+{
-+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-+ const struct freq_multi_tbl *f;
-+ const struct freq_conf *conf;
-+ struct freq_tbl f_tbl = {};
-+
-+ f = qcom_find_freq_multi(rcg->freq_multi_tbl, rate);
-+ if (!f || !f->confs)
-+ return -EINVAL;
-+
-+ conf = __clk_rcg2_select_conf(hw, f, rate);
-+ if (IS_ERR(conf))
-+ return PTR_ERR(conf);
-+
-+ f_tbl.freq = f->freq;
-+ f_tbl.src = conf->src;
-+ f_tbl.pre_div = conf->pre_div;
-+ f_tbl.m = conf->m;
-+ f_tbl.n = conf->n;
-+
-+ return clk_rcg2_configure(rcg, &f_tbl);
-+}
-+
- static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
- {
-@@ -383,6 +524,12 @@ static int clk_rcg2_set_floor_rate(struc
- return __clk_rcg2_set_rate(hw, rate, FLOOR);
- }
-
-+static int clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate,
-+ unsigned long parent_rate)
-+{
-+ return __clk_rcg2_fm_set_rate(hw, rate);
-+}
-+
- static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
- unsigned long rate, unsigned long parent_rate, u8 index)
- {
-@@ -395,6 +542,12 @@ static int clk_rcg2_set_floor_rate_and_p
- return __clk_rcg2_set_rate(hw, rate, FLOOR);
- }
-
-+static int clk_rcg2_fm_set_rate_and_parent(struct clk_hw *hw,
-+ unsigned long rate, unsigned long parent_rate, u8 index)
-+{
-+ return __clk_rcg2_fm_set_rate(hw, rate);
-+}
-+
- static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
- {
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-@@ -505,6 +658,19 @@ const struct clk_ops clk_rcg2_floor_ops
- };
- EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
-
-+const struct clk_ops clk_rcg2_fm_ops = {
-+ .is_enabled = clk_rcg2_is_enabled,
-+ .get_parent = clk_rcg2_get_parent,
-+ .set_parent = clk_rcg2_set_parent,
-+ .recalc_rate = clk_rcg2_recalc_rate,
-+ .determine_rate = clk_rcg2_fm_determine_rate,
-+ .set_rate = clk_rcg2_fm_set_rate,
-+ .set_rate_and_parent = clk_rcg2_fm_set_rate_and_parent,
-+ .get_duty_cycle = clk_rcg2_get_duty_cycle,
-+ .set_duty_cycle = clk_rcg2_set_duty_cycle,
-+};
-+EXPORT_SYMBOL_GPL(clk_rcg2_fm_ops);
-+
- const struct clk_ops clk_rcg2_mux_closest_ops = {
- .determine_rate = __clk_mux_determine_rate_closest,
- .get_parent = clk_rcg2_get_parent,
---- a/drivers/clk/qcom/common.c
-+++ b/drivers/clk/qcom/common.c
-@@ -41,6 +41,24 @@ struct freq_tbl *qcom_find_freq(const st
- }
- EXPORT_SYMBOL_GPL(qcom_find_freq);
-
-+const struct freq_multi_tbl *qcom_find_freq_multi(const struct freq_multi_tbl *f,
-+ unsigned long rate)
-+{
-+ if (!f)
-+ return NULL;
-+
-+ if (!f->freq)
-+ return f;
-+
-+ for (; f->freq; f++)
-+ if (rate <= f->freq)
-+ return f;
-+
-+ /* Default to our fastest rate */
-+ return f - 1;
-+}
-+EXPORT_SYMBOL_GPL(qcom_find_freq_multi);
-+
- const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
- unsigned long rate)
- {
---- a/drivers/clk/qcom/common.h
-+++ b/drivers/clk/qcom/common.h
-@@ -45,6 +45,8 @@ extern const struct freq_tbl *qcom_find_
- unsigned long rate);
- extern const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
- unsigned long rate);
-+extern const struct freq_multi_tbl *qcom_find_freq_multi(const struct freq_multi_tbl *f,
-+ unsigned long rate);
- extern void
- qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
- extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
+++ /dev/null
-From 7311bbfff31c4961c57d94c165fa843f155f8236 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 5 Jun 2024 20:45:38 +0800
-Subject: [PATCH] clk: qcom: branch: Add clk_branch2_prepare_ops
-
-Add the clk_branch2_prepare_ops for supporting clock controller
-where the hardware register is accessed by MDIO bus, and the
-spin lock can't be used because of sleep during the MDIO
-operation.
-
-The clock is enabled by the .prepare instead of .enable when
-the clk_branch2_prepare_ops is used.
-
-Acked-by: Stephen Boyd <sboyd@kernel.org>
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
-Link: https://lore.kernel.org/r/20240605124541.2711467-2-quic_luoj@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- drivers/clk/qcom/clk-branch.c | 7 +++++++
- drivers/clk/qcom/clk-branch.h | 1 +
- 2 files changed, 8 insertions(+)
-
---- a/drivers/clk/qcom/clk-branch.c
-+++ b/drivers/clk/qcom/clk-branch.c
-@@ -153,3 +153,10 @@ const struct clk_ops clk_branch_simple_o
- .is_enabled = clk_is_enabled_regmap,
- };
- EXPORT_SYMBOL_GPL(clk_branch_simple_ops);
-+
-+const struct clk_ops clk_branch2_prepare_ops = {
-+ .prepare = clk_branch2_enable,
-+ .unprepare = clk_branch2_disable,
-+ .is_prepared = clk_is_enabled_regmap,
-+};
-+EXPORT_SYMBOL_GPL(clk_branch2_prepare_ops);
---- a/drivers/clk/qcom/clk-branch.h
-+++ b/drivers/clk/qcom/clk-branch.h
-@@ -85,6 +85,7 @@ extern const struct clk_ops clk_branch_o
- extern const struct clk_ops clk_branch2_ops;
- extern const struct clk_ops clk_branch_simple_ops;
- extern const struct clk_ops clk_branch2_aon_ops;
-+extern const struct clk_ops clk_branch2_prepare_ops;
-
- #define to_clk_branch(_hw) \
- container_of(to_clk_regmap(_hw), struct clk_branch, clkr)
+++ /dev/null
-From 9f93a0a428606341da25bf2a00244701b58e08b9 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 5 Jun 2024 20:45:40 +0800
-Subject: [PATCH] clk: qcom: common: commonize qcom_cc_really_probe
-
-The previous wrapper qcom_cc_really_probe takes the platform
-device as parameter, which is limited to platform driver.
-
-As for qca8k clock controller driver, which is registered as
-the MDIO device, which also follows the qcom clock framework.
-
-To commonize qcom_cc_really_probe, updating it to take the
-struct device as parameter, so that the qcom_cc_really_probe
-can be utilized by the previous platform device and the new
-added MDIO device.
-
-Also update the current clock controller drivers to take
-&pdev->dev as parameter when calling qcom_cc_really_probe.
-
-Reviewed-by: Stephen Boyd <sboyd@kernel.org>
-Tested-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
-Reviewed-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
-Link: https://lore.kernel.org/r/20240605124541.2711467-4-quic_luoj@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- drivers/clk/qcom/apss-ipq6018.c | 2 +-
- drivers/clk/qcom/camcc-sc7180.c | 2 +-
- drivers/clk/qcom/camcc-sc7280.c | 2 +-
- drivers/clk/qcom/camcc-sc8280xp.c | 2 +-
- drivers/clk/qcom/camcc-sdm845.c | 2 +-
- drivers/clk/qcom/camcc-sm6350.c | 2 +-
- drivers/clk/qcom/camcc-sm7150.c | 2 +-
- drivers/clk/qcom/camcc-sm8250.c | 2 +-
- drivers/clk/qcom/camcc-sm8450.c | 2 +-
- drivers/clk/qcom/camcc-sm8550.c | 2 +-
- drivers/clk/qcom/camcc-x1e80100.c | 2 +-
- drivers/clk/qcom/common.c | 7 +++----
- drivers/clk/qcom/common.h | 2 +-
- drivers/clk/qcom/dispcc-qcm2290.c | 2 +-
- drivers/clk/qcom/dispcc-sc7180.c | 2 +-
- drivers/clk/qcom/dispcc-sc7280.c | 2 +-
- drivers/clk/qcom/dispcc-sc8280xp.c | 2 +-
- drivers/clk/qcom/dispcc-sdm845.c | 2 +-
- drivers/clk/qcom/dispcc-sm6115.c | 2 +-
- drivers/clk/qcom/dispcc-sm6125.c | 2 +-
- drivers/clk/qcom/dispcc-sm6350.c | 2 +-
- drivers/clk/qcom/dispcc-sm6375.c | 2 +-
- drivers/clk/qcom/dispcc-sm7150.c | 2 +-
- drivers/clk/qcom/dispcc-sm8250.c | 2 +-
- drivers/clk/qcom/dispcc-sm8450.c | 2 +-
- drivers/clk/qcom/dispcc-sm8550.c | 2 +-
- drivers/clk/qcom/dispcc-sm8650.c | 2 +-
- drivers/clk/qcom/dispcc-x1e80100.c | 2 +-
- drivers/clk/qcom/ecpricc-qdu1000.c | 2 +-
- drivers/clk/qcom/gcc-ipq5018.c | 2 +-
- drivers/clk/qcom/gcc-ipq6018.c | 2 +-
- drivers/clk/qcom/gcc-ipq8074.c | 2 +-
- drivers/clk/qcom/gcc-mdm9607.c | 2 +-
- drivers/clk/qcom/gcc-mdm9615.c | 2 +-
- drivers/clk/qcom/gcc-msm8917.c | 2 +-
- drivers/clk/qcom/gcc-msm8939.c | 2 +-
- drivers/clk/qcom/gcc-msm8953.c | 2 +-
- drivers/clk/qcom/gcc-msm8976.c | 2 +-
- drivers/clk/qcom/gcc-msm8996.c | 2 +-
- drivers/clk/qcom/gcc-msm8998.c | 2 +-
- drivers/clk/qcom/gcc-qcm2290.c | 2 +-
- drivers/clk/qcom/gcc-qcs404.c | 2 +-
- drivers/clk/qcom/gcc-qdu1000.c | 2 +-
- drivers/clk/qcom/gcc-sa8775p.c | 2 +-
- drivers/clk/qcom/gcc-sc7180.c | 2 +-
- drivers/clk/qcom/gcc-sc7280.c | 2 +-
- drivers/clk/qcom/gcc-sc8180x.c | 2 +-
- drivers/clk/qcom/gcc-sc8280xp.c | 2 +-
- drivers/clk/qcom/gcc-sdm660.c | 2 +-
- drivers/clk/qcom/gcc-sdm845.c | 2 +-
- drivers/clk/qcom/gcc-sdx55.c | 2 +-
- drivers/clk/qcom/gcc-sdx65.c | 2 +-
- drivers/clk/qcom/gcc-sdx75.c | 2 +-
- drivers/clk/qcom/gcc-sm4450.c | 2 +-
- drivers/clk/qcom/gcc-sm6115.c | 2 +-
- drivers/clk/qcom/gcc-sm6125.c | 2 +-
- drivers/clk/qcom/gcc-sm6350.c | 2 +-
- drivers/clk/qcom/gcc-sm6375.c | 2 +-
- drivers/clk/qcom/gcc-sm7150.c | 2 +-
- drivers/clk/qcom/gcc-sm8150.c | 2 +-
- drivers/clk/qcom/gcc-sm8250.c | 2 +-
- drivers/clk/qcom/gcc-sm8350.c | 2 +-
- drivers/clk/qcom/gcc-sm8450.c | 2 +-
- drivers/clk/qcom/gcc-sm8550.c | 2 +-
- drivers/clk/qcom/gcc-sm8650.c | 2 +-
- drivers/clk/qcom/gcc-x1e80100.c | 2 +-
- drivers/clk/qcom/gpucc-msm8998.c | 2 +-
- drivers/clk/qcom/gpucc-sa8775p.c | 2 +-
- drivers/clk/qcom/gpucc-sc7180.c | 2 +-
- drivers/clk/qcom/gpucc-sc7280.c | 2 +-
- drivers/clk/qcom/gpucc-sc8280xp.c | 2 +-
- drivers/clk/qcom/gpucc-sdm660.c | 2 +-
- drivers/clk/qcom/gpucc-sdm845.c | 2 +-
- drivers/clk/qcom/gpucc-sm6115.c | 2 +-
- drivers/clk/qcom/gpucc-sm6125.c | 2 +-
- drivers/clk/qcom/gpucc-sm6350.c | 2 +-
- drivers/clk/qcom/gpucc-sm6375.c | 2 +-
- drivers/clk/qcom/gpucc-sm8150.c | 2 +-
- drivers/clk/qcom/gpucc-sm8250.c | 2 +-
- drivers/clk/qcom/gpucc-sm8350.c | 2 +-
- drivers/clk/qcom/gpucc-sm8450.c | 2 +-
- drivers/clk/qcom/gpucc-sm8550.c | 2 +-
- drivers/clk/qcom/gpucc-sm8650.c | 2 +-
- drivers/clk/qcom/gpucc-x1e80100.c | 2 +-
- drivers/clk/qcom/lcc-ipq806x.c | 2 +-
- drivers/clk/qcom/lcc-msm8960.c | 2 +-
- drivers/clk/qcom/lpassaudiocc-sc7280.c | 4 ++--
- drivers/clk/qcom/lpasscorecc-sc7180.c | 2 +-
- drivers/clk/qcom/lpasscorecc-sc7280.c | 2 +-
- drivers/clk/qcom/mmcc-msm8960.c | 2 +-
- drivers/clk/qcom/mmcc-msm8974.c | 2 +-
- drivers/clk/qcom/mmcc-msm8994.c | 2 +-
- drivers/clk/qcom/mmcc-msm8996.c | 2 +-
- drivers/clk/qcom/mmcc-msm8998.c | 2 +-
- drivers/clk/qcom/mmcc-sdm660.c | 2 +-
- drivers/clk/qcom/tcsrcc-sm8550.c | 2 +-
- drivers/clk/qcom/videocc-sc7180.c | 2 +-
- drivers/clk/qcom/videocc-sc7280.c | 2 +-
- drivers/clk/qcom/videocc-sdm845.c | 2 +-
- drivers/clk/qcom/videocc-sm7150.c | 2 +-
- drivers/clk/qcom/videocc-sm8150.c | 2 +-
- drivers/clk/qcom/videocc-sm8250.c | 2 +-
- drivers/clk/qcom/videocc-sm8350.c | 2 +-
- drivers/clk/qcom/videocc-sm8450.c | 2 +-
- drivers/clk/qcom/videocc-sm8550.c | 2 +-
- 105 files changed, 108 insertions(+), 109 deletions(-)
-
---- a/drivers/clk/qcom/apss-ipq6018.c
-+++ b/drivers/clk/qcom/apss-ipq6018.c
-@@ -89,7 +89,7 @@ static int apss_ipq6018_probe(struct pla
- if (!regmap)
- return -ENODEV;
-
-- return qcom_cc_really_probe(pdev, &apss_ipq6018_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &apss_ipq6018_desc, regmap);
- }
-
- static struct platform_driver apss_ipq6018_driver = {
---- a/drivers/clk/qcom/camcc-sc7280.c
-+++ b/drivers/clk/qcom/camcc-sc7280.c
-@@ -2462,7 +2462,7 @@ static int cam_cc_sc7280_probe(struct pl
- clk_lucid_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- clk_lucid_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
-
-- return qcom_cc_really_probe(pdev, &cam_cc_sc7280_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &cam_cc_sc7280_desc, regmap);
- }
-
- static struct platform_driver cam_cc_sc7280_driver = {
---- a/drivers/clk/qcom/camcc-sdm845.c
-+++ b/drivers/clk/qcom/camcc-sdm845.c
-@@ -1735,7 +1735,7 @@ static int cam_cc_sdm845_probe(struct pl
- cam_cc_pll_config.l = 0x14;
- clk_fabia_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll_config);
-
-- return qcom_cc_really_probe(pdev, &cam_cc_sdm845_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &cam_cc_sdm845_desc, regmap);
- }
-
- static struct platform_driver cam_cc_sdm845_driver = {
---- a/drivers/clk/qcom/camcc-sm6350.c
-+++ b/drivers/clk/qcom/camcc-sm6350.c
-@@ -1879,7 +1879,7 @@ static int camcc_sm6350_probe(struct pla
- clk_agera_pll_configure(&camcc_pll2, regmap, &camcc_pll2_config);
- clk_fabia_pll_configure(&camcc_pll3, regmap, &camcc_pll3_config);
-
-- return qcom_cc_really_probe(pdev, &camcc_sm6350_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &camcc_sm6350_desc, regmap);
- }
-
- static struct platform_driver camcc_sm6350_driver = {
---- a/drivers/clk/qcom/camcc-sm8250.c
-+++ b/drivers/clk/qcom/camcc-sm8250.c
-@@ -2433,7 +2433,7 @@ static int cam_cc_sm8250_probe(struct pl
- clk_lucid_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
-
-- return qcom_cc_really_probe(pdev, &cam_cc_sm8250_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8250_desc, regmap);
- }
-
- static struct platform_driver cam_cc_sm8250_driver = {
---- a/drivers/clk/qcom/camcc-sm8450.c
-+++ b/drivers/clk/qcom/camcc-sm8450.c
-@@ -2839,7 +2839,7 @@ static int cam_cc_sm8450_probe(struct pl
- clk_lucid_evo_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
-
-- return qcom_cc_really_probe(pdev, &cam_cc_sm8450_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8450_desc, regmap);
- }
-
- static struct platform_driver cam_cc_sm8450_driver = {
---- a/drivers/clk/qcom/common.c
-+++ b/drivers/clk/qcom/common.c
-@@ -250,11 +250,10 @@ static struct clk_hw *qcom_cc_clk_hw_get
- return cc->rclks[idx] ? &cc->rclks[idx]->hw : NULL;
- }
-
--int qcom_cc_really_probe(struct platform_device *pdev,
-+int qcom_cc_really_probe(struct device *dev,
- const struct qcom_cc_desc *desc, struct regmap *regmap)
- {
- int i, ret;
-- struct device *dev = &pdev->dev;
- struct qcom_reset_controller *reset;
- struct qcom_cc *cc;
- struct gdsc_desc *scd;
-@@ -331,7 +330,7 @@ int qcom_cc_probe(struct platform_device
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
-- return qcom_cc_really_probe(pdev, desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, desc, regmap);
- }
- EXPORT_SYMBOL_GPL(qcom_cc_probe);
-
-@@ -349,7 +348,7 @@ int qcom_cc_probe_by_index(struct platfo
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
-- return qcom_cc_really_probe(pdev, desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, desc, regmap);
- }
- EXPORT_SYMBOL_GPL(qcom_cc_probe_by_index);
-
---- a/drivers/clk/qcom/common.h
-+++ b/drivers/clk/qcom/common.h
-@@ -60,7 +60,7 @@ extern int qcom_cc_register_sleep_clk(st
-
- extern struct regmap *qcom_cc_map(struct platform_device *pdev,
- const struct qcom_cc_desc *desc);
--extern int qcom_cc_really_probe(struct platform_device *pdev,
-+extern int qcom_cc_really_probe(struct device *dev,
- const struct qcom_cc_desc *desc,
- struct regmap *regmap);
- extern int qcom_cc_probe(struct platform_device *pdev,
---- a/drivers/clk/qcom/dispcc-qcm2290.c
-+++ b/drivers/clk/qcom/dispcc-qcm2290.c
-@@ -522,7 +522,7 @@ static int disp_cc_qcm2290_probe(struct
- /* Keep DISP_CC_XO_CLK always-ON */
- regmap_update_bits(regmap, 0x604c, BIT(0), BIT(0));
-
-- ret = qcom_cc_really_probe(pdev, &disp_cc_qcm2290_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_qcm2290_desc, regmap);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register DISP CC clocks\n");
- return ret;
---- a/drivers/clk/qcom/dispcc-sc7180.c
-+++ b/drivers/clk/qcom/dispcc-sc7180.c
-@@ -713,7 +713,7 @@ static int disp_cc_sc7180_probe(struct p
-
- clk_fabia_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll_config);
-
-- return qcom_cc_really_probe(pdev, &disp_cc_sc7180_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &disp_cc_sc7180_desc, regmap);
- }
-
- static struct platform_driver disp_cc_sc7180_driver = {
---- a/drivers/clk/qcom/dispcc-sc7280.c
-+++ b/drivers/clk/qcom/dispcc-sc7280.c
-@@ -884,7 +884,7 @@ static int disp_cc_sc7280_probe(struct p
- */
- regmap_update_bits(regmap, 0x5008, BIT(0), BIT(0));
-
-- return qcom_cc_really_probe(pdev, &disp_cc_sc7280_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &disp_cc_sc7280_desc, regmap);
- }
-
- static struct platform_driver disp_cc_sc7280_driver = {
---- a/drivers/clk/qcom/dispcc-sc8280xp.c
-+++ b/drivers/clk/qcom/dispcc-sc8280xp.c
-@@ -3172,7 +3172,7 @@ static int disp_cc_sc8280xp_probe(struct
- clk_lucid_pll_configure(clkr_to_alpha_clk_pll(desc->clks[DISP_CC_PLL1]), regmap, &disp_cc_pll1_config);
- clk_lucid_pll_configure(clkr_to_alpha_clk_pll(desc->clks[DISP_CC_PLL2]), regmap, &disp_cc_pll2_config);
-
-- ret = qcom_cc_really_probe(pdev, desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, desc, regmap);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register display clock controller\n");
- goto out_pm_runtime_put;
---- a/drivers/clk/qcom/dispcc-sdm845.c
-+++ b/drivers/clk/qcom/dispcc-sdm845.c
-@@ -863,7 +863,7 @@ static int disp_cc_sdm845_probe(struct p
- /* Enable hardware clock gating for DSI and MDP clocks */
- regmap_update_bits(regmap, 0x8000, 0x7f0, 0x7f0);
-
-- return qcom_cc_really_probe(pdev, &disp_cc_sdm845_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &disp_cc_sdm845_desc, regmap);
- }
-
- static struct platform_driver disp_cc_sdm845_driver = {
---- a/drivers/clk/qcom/dispcc-sm6115.c
-+++ b/drivers/clk/qcom/dispcc-sm6115.c
-@@ -586,7 +586,7 @@ static int disp_cc_sm6115_probe(struct p
- /* Keep DISP_CC_XO_CLK always-ON */
- regmap_update_bits(regmap, 0x604c, BIT(0), BIT(0));
-
-- ret = qcom_cc_really_probe(pdev, &disp_cc_sm6115_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm6115_desc, regmap);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register DISP CC clocks\n");
- return ret;
---- a/drivers/clk/qcom/dispcc-sm6125.c
-+++ b/drivers/clk/qcom/dispcc-sm6125.c
-@@ -682,7 +682,7 @@ static int disp_cc_sm6125_probe(struct p
-
- clk_alpha_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
-
-- return qcom_cc_really_probe(pdev, &disp_cc_sm6125_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &disp_cc_sm6125_desc, regmap);
- }
-
- static struct platform_driver disp_cc_sm6125_driver = {
---- a/drivers/clk/qcom/dispcc-sm6350.c
-+++ b/drivers/clk/qcom/dispcc-sm6350.c
-@@ -760,7 +760,7 @@ static int disp_cc_sm6350_probe(struct p
-
- clk_fabia_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
-
-- return qcom_cc_really_probe(pdev, &disp_cc_sm6350_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &disp_cc_sm6350_desc, regmap);
- }
-
- static struct platform_driver disp_cc_sm6350_driver = {
---- a/drivers/clk/qcom/dispcc-sm6375.c
-+++ b/drivers/clk/qcom/dispcc-sm6375.c
-@@ -583,7 +583,7 @@ static int disp_cc_sm6375_probe(struct p
-
- clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
-
-- return qcom_cc_really_probe(pdev, &disp_cc_sm6375_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &disp_cc_sm6375_desc, regmap);
- }
-
- static struct platform_driver disp_cc_sm6375_driver = {
---- a/drivers/clk/qcom/dispcc-sm8250.c
-+++ b/drivers/clk/qcom/dispcc-sm8250.c
-@@ -1376,7 +1376,7 @@ static int disp_cc_sm8250_probe(struct p
- /* DISP_CC_XO_CLK always-on */
- regmap_update_bits(regmap, 0x605c, BIT(0), BIT(0));
-
-- ret = qcom_cc_really_probe(pdev, &disp_cc_sm8250_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8250_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
---- a/drivers/clk/qcom/dispcc-sm8450.c
-+++ b/drivers/clk/qcom/dispcc-sm8450.c
-@@ -1781,7 +1781,7 @@ static int disp_cc_sm8450_probe(struct p
- */
- regmap_update_bits(regmap, 0xe05c, BIT(0), BIT(0));
-
-- ret = qcom_cc_really_probe(pdev, &disp_cc_sm8450_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8450_desc, regmap);
- if (ret)
- goto err_put_rpm;
-
---- a/drivers/clk/qcom/dispcc-sm8550.c
-+++ b/drivers/clk/qcom/dispcc-sm8550.c
-@@ -1774,7 +1774,7 @@ static int disp_cc_sm8550_probe(struct p
- */
- regmap_update_bits(regmap, 0xe054, BIT(0), BIT(0));
-
-- ret = qcom_cc_really_probe(pdev, &disp_cc_sm8550_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8550_desc, regmap);
- if (ret)
- goto err_put_rpm;
-
---- a/drivers/clk/qcom/gcc-ipq5018.c
-+++ b/drivers/clk/qcom/gcc-ipq5018.c
-@@ -3697,7 +3697,7 @@ static int gcc_ipq5018_probe(struct plat
-
- clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
-
-- return qcom_cc_really_probe(pdev, &ipq5018_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &ipq5018_desc, regmap);
- }
-
- static struct platform_driver gcc_ipq5018_driver = {
---- a/drivers/clk/qcom/gcc-ipq6018.c
-+++ b/drivers/clk/qcom/gcc-ipq6018.c
-@@ -4604,7 +4604,7 @@ static int gcc_ipq6018_probe(struct plat
- clk_alpha_pll_configure(&nss_crypto_pll_main, regmap,
- &nss_crypto_pll_config);
-
-- return qcom_cc_really_probe(pdev, &gcc_ipq6018_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_ipq6018_desc, regmap);
- }
-
- static struct platform_driver gcc_ipq6018_driver = {
---- a/drivers/clk/qcom/gcc-ipq8074.c
-+++ b/drivers/clk/qcom/gcc-ipq8074.c
-@@ -4728,7 +4728,7 @@ static int gcc_ipq8074_probe(struct plat
- clk_alpha_pll_configure(&nss_crypto_pll_main, regmap,
- &nss_crypto_pll_config);
-
-- return qcom_cc_really_probe(pdev, &gcc_ipq8074_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_ipq8074_desc, regmap);
- }
-
- static struct platform_driver gcc_ipq8074_driver = {
---- a/drivers/clk/qcom/gcc-mdm9607.c
-+++ b/drivers/clk/qcom/gcc-mdm9607.c
-@@ -1604,7 +1604,7 @@ static int gcc_mdm9607_probe(struct plat
- /* Vote for GPLL0 to turn on. Needed by acpuclock. */
- regmap_update_bits(regmap, 0x45000, BIT(0), BIT(0));
-
-- return qcom_cc_really_probe(pdev, &gcc_mdm9607_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_mdm9607_desc, regmap);
- }
-
- static struct platform_driver gcc_mdm9607_driver = {
---- a/drivers/clk/qcom/gcc-mdm9615.c
-+++ b/drivers/clk/qcom/gcc-mdm9615.c
-@@ -1736,7 +1736,7 @@ static int gcc_mdm9615_probe(struct plat
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
-- return qcom_cc_really_probe(pdev, &gcc_mdm9615_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_mdm9615_desc, regmap);
- }
-
- static struct platform_driver gcc_mdm9615_driver = {
---- a/drivers/clk/qcom/gcc-msm8917.c
-+++ b/drivers/clk/qcom/gcc-msm8917.c
-@@ -3270,7 +3270,7 @@ static int gcc_msm8917_probe(struct plat
-
- clk_alpha_pll_configure(&gpll3_early, regmap, &gpll3_early_config);
-
-- return qcom_cc_really_probe(pdev, gcc_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, gcc_desc, regmap);
- }
-
- static const struct of_device_id gcc_msm8917_match_table[] = {
---- a/drivers/clk/qcom/gcc-msm8939.c
-+++ b/drivers/clk/qcom/gcc-msm8939.c
-@@ -4004,7 +4004,7 @@ static int gcc_msm8939_probe(struct plat
- clk_pll_configure_sr_hpm_lp(&gpll3, regmap, &gpll3_config, true);
- clk_pll_configure_sr_hpm_lp(&gpll4, regmap, &gpll4_config, true);
-
-- return qcom_cc_really_probe(pdev, &gcc_msm8939_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_msm8939_desc, regmap);
- }
-
- static struct platform_driver gcc_msm8939_driver = {
---- a/drivers/clk/qcom/gcc-msm8953.c
-+++ b/drivers/clk/qcom/gcc-msm8953.c
-@@ -4216,7 +4216,7 @@ static int gcc_msm8953_probe(struct plat
-
- clk_alpha_pll_configure(&gpll3_early, regmap, &gpll3_early_config);
-
-- return qcom_cc_really_probe(pdev, &gcc_msm8953_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_msm8953_desc, regmap);
- }
-
- static const struct of_device_id gcc_msm8953_match_table[] = {
---- a/drivers/clk/qcom/gcc-msm8976.c
-+++ b/drivers/clk/qcom/gcc-msm8976.c
-@@ -4129,7 +4129,7 @@ static int gcc_msm8976_probe(struct plat
- if (ret)
- return ret;
-
-- return qcom_cc_really_probe(pdev, &gcc_msm8976_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_msm8976_desc, regmap);
- }
-
- static struct platform_driver gcc_msm8976_driver = {
---- a/drivers/clk/qcom/gcc-msm8996.c
-+++ b/drivers/clk/qcom/gcc-msm8996.c
-@@ -3620,7 +3620,7 @@ static int gcc_msm8996_probe(struct plat
- */
- regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
-
-- return qcom_cc_really_probe(pdev, &gcc_msm8996_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_msm8996_desc, regmap);
- }
-
- static struct platform_driver gcc_msm8996_driver = {
---- a/drivers/clk/qcom/gcc-msm8998.c
-+++ b/drivers/clk/qcom/gcc-msm8998.c
-@@ -3292,7 +3292,7 @@ static int gcc_msm8998_probe(struct plat
- regmap_write(regmap, GCC_MMSS_MISC, 0x10003);
- regmap_write(regmap, GCC_GPU_MISC, 0x10003);
-
-- return qcom_cc_really_probe(pdev, &gcc_msm8998_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_msm8998_desc, regmap);
- }
-
- static const struct of_device_id gcc_msm8998_match_table[] = {
---- a/drivers/clk/qcom/gcc-qcm2290.c
-+++ b/drivers/clk/qcom/gcc-qcm2290.c
-@@ -2994,7 +2994,7 @@ static int gcc_qcm2290_probe(struct plat
- clk_alpha_pll_configure(&gpll8, regmap, &gpll8_config);
- clk_alpha_pll_configure(&gpll9, regmap, &gpll9_config);
-
-- return qcom_cc_really_probe(pdev, &gcc_qcm2290_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_qcm2290_desc, regmap);
- }
-
- static struct platform_driver gcc_qcm2290_driver = {
---- a/drivers/clk/qcom/gcc-qcs404.c
-+++ b/drivers/clk/qcom/gcc-qcs404.c
-@@ -2825,7 +2825,7 @@ static int gcc_qcs404_probe(struct platf
-
- clk_alpha_pll_configure(&gpll3_out_main, regmap, &gpll3_config);
-
-- return qcom_cc_really_probe(pdev, &gcc_qcs404_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_qcs404_desc, regmap);
- }
-
- static struct platform_driver gcc_qcs404_driver = {
---- a/drivers/clk/qcom/gcc-qdu1000.c
-+++ b/drivers/clk/qcom/gcc-qdu1000.c
-@@ -2674,7 +2674,7 @@ static int gcc_qdu1000_probe(struct plat
- if (ret)
- return ret;
-
-- ret = qcom_cc_really_probe(pdev, &gcc_qdu1000_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &gcc_qdu1000_desc, regmap);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "Failed to register GCC clocks\n");
-
---- a/drivers/clk/qcom/gcc-sa8775p.c
-+++ b/drivers/clk/qcom/gcc-sa8775p.c
-@@ -4798,7 +4798,7 @@ static int gcc_sa8775p_probe(struct plat
- regmap_update_bits(regmap, 0x34004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x34024, BIT(0), BIT(0));
-
-- return qcom_cc_really_probe(pdev, &gcc_sa8775p_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sa8775p_desc, regmap);
- }
-
- static struct platform_driver gcc_sa8775p_driver = {
---- a/drivers/clk/qcom/gcc-sc7180.c
-+++ b/drivers/clk/qcom/gcc-sc7180.c
-@@ -2462,7 +2462,7 @@ static int gcc_sc7180_probe(struct platf
- if (ret)
- return ret;
-
-- return qcom_cc_really_probe(pdev, &gcc_sc7180_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sc7180_desc, regmap);
- }
-
- static struct platform_driver gcc_sc7180_driver = {
---- a/drivers/clk/qcom/gcc-sc7280.c
-+++ b/drivers/clk/qcom/gcc-sc7280.c
-@@ -3475,7 +3475,7 @@ static int gcc_sc7280_probe(struct platf
- if (ret)
- return ret;
-
-- return qcom_cc_really_probe(pdev, &gcc_sc7280_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sc7280_desc, regmap);
- }
-
- static struct platform_driver gcc_sc7280_driver = {
---- a/drivers/clk/qcom/gcc-sc8180x.c
-+++ b/drivers/clk/qcom/gcc-sc8180x.c
-@@ -4555,7 +4555,7 @@ static int gcc_sc8180x_probe(struct plat
- regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
- regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
-
-- return qcom_cc_really_probe(pdev, &gcc_sc8180x_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sc8180x_desc, regmap);
- }
-
- static struct platform_driver gcc_sc8180x_driver = {
---- a/drivers/clk/qcom/gcc-sc8280xp.c
-+++ b/drivers/clk/qcom/gcc-sc8280xp.c
-@@ -7563,7 +7563,7 @@ static int gcc_sc8280xp_probe(struct pla
- if (ret)
- goto err_put_rpm;
-
-- ret = qcom_cc_really_probe(pdev, &gcc_sc8280xp_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &gcc_sc8280xp_desc, regmap);
- if (ret)
- goto err_put_rpm;
-
---- a/drivers/clk/qcom/gcc-sdm660.c
-+++ b/drivers/clk/qcom/gcc-sdm660.c
-@@ -2474,7 +2474,7 @@ static int gcc_sdm660_probe(struct platf
- if (ret)
- return ret;
-
-- return qcom_cc_really_probe(pdev, &gcc_sdm660_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sdm660_desc, regmap);
- }
-
- static struct platform_driver gcc_sdm660_driver = {
---- a/drivers/clk/qcom/gcc-sdm845.c
-+++ b/drivers/clk/qcom/gcc-sdm845.c
-@@ -4011,7 +4011,7 @@ static int gcc_sdm845_probe(struct platf
- return ret;
-
- gcc_desc = of_device_get_match_data(&pdev->dev);
-- return qcom_cc_really_probe(pdev, gcc_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, gcc_desc, regmap);
- }
-
- static struct platform_driver gcc_sdm845_driver = {
---- a/drivers/clk/qcom/gcc-sdx55.c
-+++ b/drivers/clk/qcom/gcc-sdx55.c
-@@ -1620,7 +1620,7 @@ static int gcc_sdx55_probe(struct platfo
- regmap_update_bits(regmap, 0x6d008, BIT(21), BIT(21));
- regmap_update_bits(regmap, 0x6d008, BIT(22), BIT(22));
-
-- return qcom_cc_really_probe(pdev, &gcc_sdx55_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sdx55_desc, regmap);
- }
-
- static struct platform_driver gcc_sdx55_driver = {
---- a/drivers/clk/qcom/gcc-sdx65.c
-+++ b/drivers/clk/qcom/gcc-sdx65.c
-@@ -1583,7 +1583,7 @@ static int gcc_sdx65_probe(struct platfo
- regmap_update_bits(regmap, 0x6d008, BIT(21), BIT(21));
- regmap_update_bits(regmap, 0x6d008, BIT(22), BIT(22));
-
-- return qcom_cc_really_probe(pdev, &gcc_sdx65_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sdx65_desc, regmap);
- }
-
- static struct platform_driver gcc_sdx65_driver = {
---- a/drivers/clk/qcom/gcc-sdx75.c
-+++ b/drivers/clk/qcom/gcc-sdx75.c
-@@ -2944,7 +2944,7 @@ static int gcc_sdx75_probe(struct platfo
- regmap_update_bits(regmap, 0x3e004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x3e008, BIT(0), BIT(0));
-
-- return qcom_cc_really_probe(pdev, &gcc_sdx75_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sdx75_desc, regmap);
- }
-
- static struct platform_driver gcc_sdx75_driver = {
---- a/drivers/clk/qcom/gcc-sm6115.c
-+++ b/drivers/clk/qcom/gcc-sm6115.c
-@@ -3513,7 +3513,7 @@ static int gcc_sm6115_probe(struct platf
- clk_alpha_pll_configure(&gpll10, regmap, &gpll10_config);
- clk_alpha_pll_configure(&gpll11, regmap, &gpll11_config);
-
-- return qcom_cc_really_probe(pdev, &gcc_sm6115_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm6115_desc, regmap);
- }
-
- static struct platform_driver gcc_sm6115_driver = {
---- a/drivers/clk/qcom/gcc-sm6125.c
-+++ b/drivers/clk/qcom/gcc-sm6125.c
-@@ -4161,7 +4161,7 @@ static int gcc_sm6125_probe(struct platf
- if (ret)
- return ret;
-
-- return qcom_cc_really_probe(pdev, &gcc_sm6125_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm6125_desc, regmap);
- }
-
- static struct platform_driver gcc_sm6125_driver = {
---- a/drivers/clk/qcom/gcc-sm6350.c
-+++ b/drivers/clk/qcom/gcc-sm6350.c
-@@ -2565,7 +2565,7 @@ static int gcc_sm6350_probe(struct platf
- if (ret)
- return ret;
-
-- return qcom_cc_really_probe(pdev, &gcc_sm6350_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm6350_desc, regmap);
- }
-
- static struct platform_driver gcc_sm6350_driver = {
---- a/drivers/clk/qcom/gcc-sm6375.c
-+++ b/drivers/clk/qcom/gcc-sm6375.c
-@@ -3895,7 +3895,7 @@ static int gcc_sm6375_probe(struct platf
- clk_lucid_pll_configure(&gpll8, regmap, &gpll8_config);
- clk_zonda_pll_configure(&gpll9, regmap, &gpll9_config);
-
-- return qcom_cc_really_probe(pdev, &gcc_sm6375_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm6375_desc, regmap);
- }
-
- static struct platform_driver gcc_sm6375_driver = {
---- a/drivers/clk/qcom/gcc-sm7150.c
-+++ b/drivers/clk/qcom/gcc-sm7150.c
-@@ -3022,7 +3022,7 @@ static int gcc_sm7150_probe(struct platf
- if (ret)
- return ret;
-
-- return qcom_cc_really_probe(pdev, &gcc_sm7150_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm7150_desc, regmap);
- }
-
- static struct platform_driver gcc_sm7150_driver = {
---- a/drivers/clk/qcom/gcc-sm8150.c
-+++ b/drivers/clk/qcom/gcc-sm8150.c
-@@ -3786,7 +3786,7 @@ static int gcc_sm8150_probe(struct platf
- regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
- regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
-
-- return qcom_cc_really_probe(pdev, &gcc_sm8150_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm8150_desc, regmap);
- }
-
- static struct platform_driver gcc_sm8150_driver = {
---- a/drivers/clk/qcom/gcc-sm8250.c
-+++ b/drivers/clk/qcom/gcc-sm8250.c
-@@ -3661,7 +3661,7 @@ static int gcc_sm8250_probe(struct platf
- if (ret)
- return ret;
-
-- return qcom_cc_really_probe(pdev, &gcc_sm8250_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm8250_desc, regmap);
- }
-
- static struct platform_driver gcc_sm8250_driver = {
---- a/drivers/clk/qcom/gcc-sm8350.c
-+++ b/drivers/clk/qcom/gcc-sm8350.c
-@@ -3826,7 +3826,7 @@ static int gcc_sm8350_probe(struct platf
- /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
- regmap_update_bits(regmap, gcc_ufs_phy_ice_core_clk.halt_reg, BIT(14), BIT(14));
-
-- return qcom_cc_really_probe(pdev, &gcc_sm8350_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm8350_desc, regmap);
- }
-
- static struct platform_driver gcc_sm8350_driver = {
---- a/drivers/clk/qcom/gcc-sm8450.c
-+++ b/drivers/clk/qcom/gcc-sm8450.c
-@@ -3294,7 +3294,7 @@ static int gcc_sm8450_probe(struct platf
- regmap_update_bits(regmap, 0x42004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x42028, BIT(0), BIT(0));
-
-- return qcom_cc_really_probe(pdev, &gcc_sm8450_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm8450_desc, regmap);
- }
-
- static struct platform_driver gcc_sm8450_driver = {
---- a/drivers/clk/qcom/gcc-sm8550.c
-+++ b/drivers/clk/qcom/gcc-sm8550.c
-@@ -3369,7 +3369,7 @@ static int gcc_sm8550_probe(struct platf
- /* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
- regmap_write(regmap, 0x52024, 0x0);
-
-- return qcom_cc_really_probe(pdev, &gcc_sm8550_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm8550_desc, regmap);
- }
-
- static struct platform_driver gcc_sm8550_driver = {
---- a/drivers/clk/qcom/gpucc-msm8998.c
-+++ b/drivers/clk/qcom/gpucc-msm8998.c
-@@ -334,7 +334,7 @@ static int gpucc_msm8998_probe(struct pl
- /* tweak droop detector (GPUCC_GPU_DD_WRAP_CTRL) to reduce leakage */
- regmap_write_bits(regmap, gfx3d_clk.clkr.enable_reg, BIT(0), BIT(0));
-
-- return qcom_cc_really_probe(pdev, &gpucc_msm8998_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpucc_msm8998_desc, regmap);
- }
-
- static struct platform_driver gpucc_msm8998_driver = {
---- a/drivers/clk/qcom/gpucc-sa8775p.c
-+++ b/drivers/clk/qcom/gpucc-sa8775p.c
-@@ -599,7 +599,7 @@ static int gpu_cc_sa8775p_probe(struct p
- clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
- clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
-
-- return qcom_cc_really_probe(pdev, &gpu_cc_sa8775p_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sa8775p_desc, regmap);
- }
-
- static struct platform_driver gpu_cc_sa8775p_driver = {
---- a/drivers/clk/qcom/gpucc-sc7180.c
-+++ b/drivers/clk/qcom/gpucc-sc7180.c
-@@ -241,7 +241,7 @@ static int gpu_cc_sc7180_probe(struct pl
- value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
- regmap_update_bits(regmap, 0x1098, mask, value);
-
-- return qcom_cc_really_probe(pdev, &gpu_cc_sc7180_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sc7180_desc, regmap);
- }
-
- static struct platform_driver gpu_cc_sc7180_driver = {
---- a/drivers/clk/qcom/gpucc-sc7280.c
-+++ b/drivers/clk/qcom/gpucc-sc7280.c
-@@ -465,7 +465,7 @@ static int gpu_cc_sc7280_probe(struct pl
- regmap_update_bits(regmap, 0x1098, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x1098, BIT(13), BIT(13));
-
-- return qcom_cc_really_probe(pdev, &gpu_cc_sc7280_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sc7280_desc, regmap);
- }
-
- static struct platform_driver gpu_cc_sc7280_driver = {
---- a/drivers/clk/qcom/gpucc-sc8280xp.c
-+++ b/drivers/clk/qcom/gpucc-sc8280xp.c
-@@ -451,7 +451,7 @@ static int gpu_cc_sc8280xp_probe(struct
- regmap_update_bits(regmap, 0x1170, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x109c, BIT(0), BIT(0));
-
-- ret = qcom_cc_really_probe(pdev, &gpu_cc_sc8280xp_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &gpu_cc_sc8280xp_desc, regmap);
- pm_runtime_put(&pdev->dev);
-
- return ret;
---- a/drivers/clk/qcom/gpucc-sdm660.c
-+++ b/drivers/clk/qcom/gpucc-sdm660.c
-@@ -330,7 +330,7 @@ static int gpucc_sdm660_probe(struct pla
- gpu_pll_config.alpha_hi = 0x8a;
- clk_alpha_pll_configure(&gpu_pll1_pll_out_main, regmap, &gpu_pll_config);
-
-- return qcom_cc_really_probe(pdev, &gpucc_sdm660_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpucc_sdm660_desc, regmap);
- }
-
- static struct platform_driver gpucc_sdm660_driver = {
---- a/drivers/clk/qcom/gpucc-sdm845.c
-+++ b/drivers/clk/qcom/gpucc-sdm845.c
-@@ -192,7 +192,7 @@ static int gpu_cc_sdm845_probe(struct pl
- value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
- regmap_update_bits(regmap, 0x1098, mask, value);
-
-- return qcom_cc_really_probe(pdev, &gpu_cc_sdm845_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sdm845_desc, regmap);
- }
-
- static struct platform_driver gpu_cc_sdm845_driver = {
---- a/drivers/clk/qcom/gpucc-sm6115.c
-+++ b/drivers/clk/qcom/gpucc-sm6115.c
-@@ -488,7 +488,7 @@ static int gpu_cc_sm6115_probe(struct pl
- qcom_branch_set_force_mem_core(regmap, gpu_cc_gx_gfx3d_clk, true);
- qcom_branch_set_force_periph_on(regmap, gpu_cc_gx_gfx3d_clk, true);
-
-- return qcom_cc_really_probe(pdev, &gpu_cc_sm6115_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm6115_desc, regmap);
- }
-
- static struct platform_driver gpu_cc_sm6115_driver = {
---- a/drivers/clk/qcom/gpucc-sm6125.c
-+++ b/drivers/clk/qcom/gpucc-sm6125.c
-@@ -409,7 +409,7 @@ static int gpu_cc_sm6125_probe(struct pl
- qcom_branch_set_force_mem_core(regmap, gpu_cc_gx_gfx3d_clk, true);
- qcom_branch_set_force_periph_on(regmap, gpu_cc_gx_gfx3d_clk, true);
-
-- return qcom_cc_really_probe(pdev, &gpu_cc_sm6125_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm6125_desc, regmap);
- }
-
- static struct platform_driver gpu_cc_sm6125_driver = {
---- a/drivers/clk/qcom/gpucc-sm6350.c
-+++ b/drivers/clk/qcom/gpucc-sm6350.c
-@@ -502,7 +502,7 @@ static int gpu_cc_sm6350_probe(struct pl
- value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
- regmap_update_bits(regmap, 0x1098, mask, value);
-
-- return qcom_cc_really_probe(pdev, &gpu_cc_sm6350_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm6350_desc, regmap);
- }
-
- static struct platform_driver gpu_cc_sm6350_driver = {
---- a/drivers/clk/qcom/gpucc-sm6375.c
-+++ b/drivers/clk/qcom/gpucc-sm6375.c
-@@ -455,7 +455,7 @@ static int gpucc_sm6375_probe(struct pla
- clk_lucid_pll_configure(&gpucc_pll0, regmap, &gpucc_pll0_config);
- clk_lucid_pll_configure(&gpucc_pll1, regmap, &gpucc_pll1_config);
-
-- ret = qcom_cc_really_probe(pdev, &gpucc_sm6375_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &gpucc_sm6375_desc, regmap);
- pm_runtime_put(&pdev->dev);
-
- return ret;
---- a/drivers/clk/qcom/gpucc-sm8150.c
-+++ b/drivers/clk/qcom/gpucc-sm8150.c
-@@ -304,7 +304,7 @@ static int gpu_cc_sm8150_probe(struct pl
-
- clk_trion_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
-
-- return qcom_cc_really_probe(pdev, &gpu_cc_sm8150_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8150_desc, regmap);
- }
-
- static struct platform_driver gpu_cc_sm8150_driver = {
---- a/drivers/clk/qcom/gpucc-sm8250.c
-+++ b/drivers/clk/qcom/gpucc-sm8250.c
-@@ -320,7 +320,7 @@ static int gpu_cc_sm8250_probe(struct pl
- value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
- regmap_update_bits(regmap, 0x1098, mask, value);
-
-- return qcom_cc_really_probe(pdev, &gpu_cc_sm8250_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8250_desc, regmap);
- }
-
- static struct platform_driver gpu_cc_sm8250_driver = {
---- a/drivers/clk/qcom/gpucc-sm8350.c
-+++ b/drivers/clk/qcom/gpucc-sm8350.c
-@@ -605,7 +605,7 @@ static int gpu_cc_sm8350_probe(struct pl
- clk_lucid_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
- clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
-
-- return qcom_cc_really_probe(pdev, &gpu_cc_sm8350_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8350_desc, regmap);
- }
-
- static const struct of_device_id gpu_cc_sm8350_match_table[] = {
---- a/drivers/clk/qcom/gpucc-sm8450.c
-+++ b/drivers/clk/qcom/gpucc-sm8450.c
-@@ -751,7 +751,7 @@ static int gpu_cc_sm8450_probe(struct pl
- clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
- clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
-
-- return qcom_cc_really_probe(pdev, &gpu_cc_sm8450_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8450_desc, regmap);
- }
-
- static struct platform_driver gpu_cc_sm8450_driver = {
---- a/drivers/clk/qcom/gpucc-sm8550.c
-+++ b/drivers/clk/qcom/gpucc-sm8550.c
-@@ -585,7 +585,7 @@ static int gpu_cc_sm8550_probe(struct pl
- regmap_update_bits(regmap, 0x9004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x900c, BIT(0), BIT(0));
-
-- return qcom_cc_really_probe(pdev, &gpu_cc_sm8550_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8550_desc, regmap);
- }
-
- static struct platform_driver gpu_cc_sm8550_driver = {
---- a/drivers/clk/qcom/lcc-ipq806x.c
-+++ b/drivers/clk/qcom/lcc-ipq806x.c
-@@ -454,7 +454,7 @@ static int lcc_ipq806x_probe(struct plat
- /* Enable PLL4 source on the LPASS Primary PLL Mux */
- regmap_write(regmap, 0xc4, 0x1);
-
-- return qcom_cc_really_probe(pdev, &lcc_ipq806x_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &lcc_ipq806x_desc, regmap);
- }
-
- static struct platform_driver lcc_ipq806x_driver = {
---- a/drivers/clk/qcom/lcc-msm8960.c
-+++ b/drivers/clk/qcom/lcc-msm8960.c
-@@ -481,7 +481,7 @@ static int lcc_msm8960_probe(struct plat
- /* Enable PLL4 source on the LPASS Primary PLL Mux */
- regmap_write(regmap, 0xc4, 0x1);
-
-- return qcom_cc_really_probe(pdev, &lcc_msm8960_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &lcc_msm8960_desc, regmap);
- }
-
- static struct platform_driver lcc_msm8960_driver = {
---- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
-+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
-@@ -772,7 +772,7 @@ static int lpass_audio_cc_sc7280_probe(s
- regmap_write(regmap, 0x4, 0x3b);
- regmap_write(regmap, 0x8, 0xff05);
-
-- ret = qcom_cc_really_probe(pdev, &lpass_audio_cc_sc7280_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &lpass_audio_cc_sc7280_desc, regmap);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC clocks\n");
- goto exit;
-@@ -847,7 +847,7 @@ static int lpass_aon_cc_sc7280_probe(str
-
- clk_lucid_pll_configure(&lpass_aon_cc_pll, regmap, &lpass_aon_cc_pll_config);
-
-- ret = qcom_cc_really_probe(pdev, &lpass_aon_cc_sc7280_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &lpass_aon_cc_sc7280_desc, regmap);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register LPASS AON CC clocks\n");
- goto exit;
---- a/drivers/clk/qcom/lpasscorecc-sc7180.c
-+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
-@@ -414,7 +414,7 @@ static int lpass_core_cc_sc7180_probe(st
- clk_fabia_pll_configure(&lpass_lpaaudio_dig_pll, regmap,
- &lpass_lpaaudio_dig_pll_config);
-
-- ret = qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &lpass_core_cc_sc7180_desc, regmap);
-
- pm_runtime_mark_last_busy(&pdev->dev);
- exit:
---- a/drivers/clk/qcom/lpasscorecc-sc7280.c
-+++ b/drivers/clk/qcom/lpasscorecc-sc7280.c
-@@ -406,7 +406,7 @@ static int lpass_core_cc_sc7280_probe(st
-
- clk_lucid_pll_configure(&lpass_core_cc_dig_pll, regmap, &lpass_core_cc_dig_pll_config);
-
-- return qcom_cc_really_probe(pdev, &lpass_core_cc_sc7280_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &lpass_core_cc_sc7280_desc, regmap);
- }
-
- static struct platform_driver lpass_core_cc_sc7280_driver = {
---- a/drivers/clk/qcom/mmcc-msm8960.c
-+++ b/drivers/clk/qcom/mmcc-msm8960.c
-@@ -3128,7 +3128,7 @@ static int mmcc_msm8960_probe(struct pla
-
- clk_pll_configure_sr(&pll15, regmap, &pll15_config, false);
-
-- return qcom_cc_really_probe(pdev, match->data, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, match->data, regmap);
- }
-
- static struct platform_driver mmcc_msm8960_driver = {
---- a/drivers/clk/qcom/mmcc-msm8974.c
-+++ b/drivers/clk/qcom/mmcc-msm8974.c
-@@ -2786,7 +2786,7 @@ static int mmcc_msm8974_probe(struct pla
- msm8226_clock_override();
- }
-
-- return qcom_cc_really_probe(pdev, desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, desc, regmap);
- }
-
- static struct platform_driver mmcc_msm8974_driver = {
---- a/drivers/clk/qcom/mmcc-msm8994.c
-+++ b/drivers/clk/qcom/mmcc-msm8994.c
-@@ -2602,7 +2602,7 @@ static int mmcc_msm8994_probe(struct pla
- clk_alpha_pll_configure(&mmpll3_early, regmap, &mmpll_p_config);
- clk_alpha_pll_configure(&mmpll5_early, regmap, &mmpll_p_config);
-
-- return qcom_cc_really_probe(pdev, &mmcc_msm8994_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &mmcc_msm8994_desc, regmap);
- }
-
- static struct platform_driver mmcc_msm8994_driver = {
---- a/drivers/clk/qcom/mmcc-msm8996.c
-+++ b/drivers/clk/qcom/mmcc-msm8996.c
-@@ -3626,7 +3626,7 @@ static int mmcc_msm8996_probe(struct pla
- /* Disable the NoC FSM for mmss_mmagic_cfg_ahb_clk */
- regmap_update_bits(regmap, 0x5054, BIT(15), 0);
-
-- return qcom_cc_really_probe(pdev, &mmcc_msm8996_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &mmcc_msm8996_desc, regmap);
- }
-
- static struct platform_driver mmcc_msm8996_driver = {
---- a/drivers/clk/qcom/mmcc-msm8998.c
-+++ b/drivers/clk/qcom/mmcc-msm8998.c
-@@ -2866,7 +2866,7 @@ static int mmcc_msm8998_probe(struct pla
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
-- return qcom_cc_really_probe(pdev, &mmcc_msm8998_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &mmcc_msm8998_desc, regmap);
- }
-
- static struct platform_driver mmcc_msm8998_driver = {
---- a/drivers/clk/qcom/mmcc-sdm660.c
-+++ b/drivers/clk/qcom/mmcc-sdm660.c
-@@ -2851,7 +2851,7 @@ static int mmcc_660_probe(struct platfor
- clk_alpha_pll_configure(&mmpll8, regmap, &mmpll8_config);
- clk_alpha_pll_configure(&mmpll10, regmap, &mmpll10_config);
-
-- return qcom_cc_really_probe(pdev, &mmcc_660_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &mmcc_660_desc, regmap);
- }
-
- static struct platform_driver mmcc_660_driver = {
---- a/drivers/clk/qcom/tcsrcc-sm8550.c
-+++ b/drivers/clk/qcom/tcsrcc-sm8550.c
-@@ -180,7 +180,7 @@ static int tcsr_cc_sm8550_probe(struct p
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
-- return qcom_cc_really_probe(pdev, &tcsr_cc_sm8550_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &tcsr_cc_sm8550_desc, regmap);
- }
-
- static struct platform_driver tcsr_cc_sm8550_driver = {
---- a/drivers/clk/qcom/videocc-sc7180.c
-+++ b/drivers/clk/qcom/videocc-sc7180.c
-@@ -226,7 +226,7 @@ static int video_cc_sc7180_probe(struct
- /* Keep VIDEO_CC_XO_CLK ALWAYS-ON */
- regmap_update_bits(regmap, 0x984, 0x1, 0x1);
-
-- return qcom_cc_really_probe(pdev, &video_cc_sc7180_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &video_cc_sc7180_desc, regmap);
- }
-
- static struct platform_driver video_cc_sc7180_driver = {
---- a/drivers/clk/qcom/videocc-sc7280.c
-+++ b/drivers/clk/qcom/videocc-sc7280.c
-@@ -298,7 +298,7 @@ static int video_cc_sc7280_probe(struct
-
- clk_lucid_pll_configure(&video_pll0, regmap, &video_pll0_config);
-
-- return qcom_cc_really_probe(pdev, &video_cc_sc7280_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &video_cc_sc7280_desc, regmap);
- }
-
- static struct platform_driver video_cc_sc7280_driver = {
---- a/drivers/clk/qcom/videocc-sdm845.c
-+++ b/drivers/clk/qcom/videocc-sdm845.c
-@@ -329,7 +329,7 @@ static int video_cc_sdm845_probe(struct
-
- clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
-
-- return qcom_cc_really_probe(pdev, &video_cc_sdm845_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &video_cc_sdm845_desc, regmap);
- }
-
- static struct platform_driver video_cc_sdm845_driver = {
---- a/drivers/clk/qcom/videocc-sm8150.c
-+++ b/drivers/clk/qcom/videocc-sm8150.c
-@@ -250,7 +250,7 @@ static int video_cc_sm8150_probe(struct
- /* Keep VIDEO_CC_XO_CLK ALWAYS-ON */
- regmap_update_bits(regmap, 0x984, 0x1, 0x1);
-
-- return qcom_cc_really_probe(pdev, &video_cc_sm8150_desc, regmap);
-+ return qcom_cc_really_probe(&pdev->dev, &video_cc_sm8150_desc, regmap);
- }
-
- static struct platform_driver video_cc_sm8150_driver = {
---- a/drivers/clk/qcom/videocc-sm8250.c
-+++ b/drivers/clk/qcom/videocc-sm8250.c
-@@ -387,7 +387,7 @@ static int video_cc_sm8250_probe(struct
- regmap_update_bits(regmap, 0xe58, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0xeec, BIT(0), BIT(0));
-
-- ret = qcom_cc_really_probe(pdev, &video_cc_sm8250_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8250_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
---- a/drivers/clk/qcom/videocc-sm8350.c
-+++ b/drivers/clk/qcom/videocc-sm8350.c
-@@ -566,7 +566,7 @@ static int video_cc_sm8350_probe(struct
- regmap_update_bits(regmap, 0xe58, BIT(0), BIT(0));
- regmap_update_bits(regmap, video_cc_xo_clk_cbcr, BIT(0), BIT(0));
-
-- ret = qcom_cc_really_probe(pdev, &video_cc_sm8350_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8350_desc, regmap);
- pm_runtime_put(&pdev->dev);
-
- return ret;
---- a/drivers/clk/qcom/videocc-sm8450.c
-+++ b/drivers/clk/qcom/videocc-sm8450.c
-@@ -433,7 +433,7 @@ static int video_cc_sm8450_probe(struct
- regmap_update_bits(regmap, 0x8130, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x8114, BIT(0), BIT(0));
-
-- ret = qcom_cc_really_probe(pdev, &video_cc_sm8450_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8450_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
---- a/drivers/clk/qcom/videocc-sm8550.c
-+++ b/drivers/clk/qcom/videocc-sm8550.c
-@@ -440,7 +440,7 @@ static int video_cc_sm8550_probe(struct
- regmap_update_bits(regmap, 0x8140, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x8124, BIT(0), BIT(0));
-
-- ret = qcom_cc_really_probe(pdev, &video_cc_sm8550_desc, regmap);
-+ ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8550_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
+++ /dev/null
-From 777b8afb8179155353ec14b1d8153122410aba29 Mon Sep 17 00:00:00 2001
-From: Vladimir Oltean <vladimir.oltean@nxp.com>
-Date: Sat, 15 Jun 2024 20:00:27 +0800
-Subject: [PATCH] net: phy: introduce core support for phy-mode = "10g-qxgmii"
-
-10G-QXGMII is a MAC-to-PHY interface defined by the USXGMII multiport
-specification. It uses the same signaling as USXGMII, but it multiplexes
-4 ports over the link, resulting in a maximum speed of 2.5G per port.
-
-Some in-tree SoCs like the NXP LS1028A use "usxgmii" when they mean
-either the single-port USXGMII or the quad-port 10G-QXGMII variant, and
-they could get away just fine with that thus far. But there is a need to
-distinguish between the 2 as far as SerDes drivers are concerned.
-
-Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
-Reviewed-by: Andrew Lunn <andrew@lunn.ch>
-Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
----
- Documentation/networking/phy.rst | 6 ++++++
- drivers/net/phy/phy-core.c | 1 +
- drivers/net/phy/phylink.c | 9 ++++++++-
- include/linux/phy.h | 4 ++++
- include/linux/phylink.h | 1 +
- 5 files changed, 20 insertions(+), 1 deletion(-)
-
---- a/Documentation/networking/phy.rst
-+++ b/Documentation/networking/phy.rst
-@@ -327,6 +327,12 @@ Some of the interface modes are describe
- This is the Penta SGMII mode, it is similar to QSGMII but it combines 5
- SGMII lines into a single link compared to 4 on QSGMII.
-
-+``PHY_INTERFACE_MODE_10G_QXGMII``
-+ Represents the 10G-QXGMII PHY-MAC interface as defined by the Cisco USXGMII
-+ Multiport Copper Interface document. It supports 4 ports over a 10.3125 GHz
-+ SerDes lane, each port having speeds of 2.5G / 1G / 100M / 10M achieved
-+ through symbol replication. The PCS expects the standard USXGMII code word.
-+
- Pause frames / flow control
- ===========================
-
---- a/drivers/net/phy/phy-core.c
-+++ b/drivers/net/phy/phy-core.c
-@@ -141,6 +141,7 @@ int phy_interface_num_ports(phy_interfac
- return 1;
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_QUSGMII:
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
- return 4;
- case PHY_INTERFACE_MODE_PSGMII:
- return 5;
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -231,6 +231,7 @@ static int phylink_interface_max_speed(p
- return SPEED_1000;
-
- case PHY_INTERFACE_MODE_2500BASEX:
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
- return SPEED_2500;
-
- case PHY_INTERFACE_MODE_5GBASER:
-@@ -500,7 +501,11 @@ unsigned long phylink_get_capabilities(p
-
- switch (interface) {
- case PHY_INTERFACE_MODE_USXGMII:
-- caps |= MAC_10000FD | MAC_5000FD | MAC_2500FD;
-+ caps |= MAC_10000FD | MAC_5000FD;
-+ fallthrough;
-+
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
-+ caps |= MAC_2500FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_RGMII_TXID:
-@@ -956,6 +961,7 @@ static int phylink_parse_mode(struct phy
- phylink_set(pl->supported, 25000baseSR_Full);
- fallthrough;
- case PHY_INTERFACE_MODE_USXGMII:
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
- case PHY_INTERFACE_MODE_10GKR:
- case PHY_INTERFACE_MODE_10GBASER:
- phylink_set(pl->supported, 10baseT_Half);
---- a/include/linux/phy.h
-+++ b/include/linux/phy.h
-@@ -125,6 +125,7 @@ extern const int phy_10gbit_features_arr
- * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
- * @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII
- * @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN
-+ * @PHY_INTERFACE_MODE_10G_QXGMII: 10G-QXGMII - 4 ports over 10G USXGMII
- * @PHY_INTERFACE_MODE_MAX: Book keeping
- *
- * Describes the interface between the MAC and PHY.
-@@ -165,6 +166,7 @@ typedef enum {
- PHY_INTERFACE_MODE_10GKR,
- PHY_INTERFACE_MODE_QUSGMII,
- PHY_INTERFACE_MODE_1000BASEKX,
-+ PHY_INTERFACE_MODE_10G_QXGMII,
- PHY_INTERFACE_MODE_MAX,
- } phy_interface_t;
-
-@@ -286,6 +288,8 @@ static inline const char *phy_modes(phy_
- return "100base-x";
- case PHY_INTERFACE_MODE_QUSGMII:
- return "qusgmii";
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
-+ return "10g-qxgmii";
- default:
- return "unknown";
- }
---- a/include/linux/phylink.h
-+++ b/include/linux/phylink.h
-@@ -128,6 +128,7 @@ static inline unsigned int phylink_pcs_n
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_QUSGMII:
- case PHY_INTERFACE_MODE_USXGMII:
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
- /* These protocols are designed for use with a PHY which
- * communicates its negotiation result back to the MAC via
- * inband communication. Note: there exist PHYs that run
-@@ -714,6 +715,7 @@ static inline int phylink_get_link_timer
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_USXGMII:
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
- return 1600000;
-
- case PHY_INTERFACE_MODE_1000BASEX:
+++ /dev/null
-From 265b07df758a998f60cf5b5aec6bd72ca676655e Mon Sep 17 00:00:00 2001
-From: Shradha Todi <shradha.t@samsung.com>
-Date: Tue, 20 Feb 2024 14:10:45 +0530
-Subject: [PATCH] clk: Provide managed helper to get and enable bulk clocks
-
-Provide a managed devm_clk_bulk* wrapper to get and enable all
-bulk clocks in order to simplify drivers that keeps all clocks
-enabled for the time of driver operation.
-
-Suggested-by: Marek Szyprowski <m.szyprowski@samsung.com>
-Reviewed-by: Alim Akhtar <alim.akhtar@samsung.com>
-Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-Signed-off-by: Shradha Todi <shradha.t@samsung.com>
-Link: https://lore.kernel.org/r/20240220084046.23786-2-shradha.t@samsung.com
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-devres.c | 40 ++++++++++++++++++++++++++++++++++++++++
- include/linux/clk.h | 22 ++++++++++++++++++++++
- 2 files changed, 62 insertions(+)
-
---- a/drivers/clk/clk-devres.c
-+++ b/drivers/clk/clk-devres.c
-@@ -182,6 +182,46 @@ int __must_check devm_clk_bulk_get_all(s
- }
- EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all);
-
-+static void devm_clk_bulk_release_all_enable(struct device *dev, void *res)
-+{
-+ struct clk_bulk_devres *devres = res;
-+
-+ clk_bulk_disable_unprepare(devres->num_clks, devres->clks);
-+ clk_bulk_put_all(devres->num_clks, devres->clks);
-+}
-+
-+int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
-+ struct clk_bulk_data **clks)
-+{
-+ struct clk_bulk_devres *devres;
-+ int ret;
-+
-+ devres = devres_alloc(devm_clk_bulk_release_all_enable,
-+ sizeof(*devres), GFP_KERNEL);
-+ if (!devres)
-+ return -ENOMEM;
-+
-+ ret = clk_bulk_get_all(dev, &devres->clks);
-+ if (ret > 0) {
-+ *clks = devres->clks;
-+ devres->num_clks = ret;
-+ } else {
-+ devres_free(devres);
-+ return ret;
-+ }
-+
-+ ret = clk_bulk_prepare_enable(devres->num_clks, *clks);
-+ if (!ret) {
-+ devres_add(dev, devres);
-+ } else {
-+ clk_bulk_put_all(devres->num_clks, devres->clks);
-+ devres_free(devres);
-+ }
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enable);
-+
- static int devm_clk_match(struct device *dev, void *res, void *data)
- {
- struct clk **c = res;
---- a/include/linux/clk.h
-+++ b/include/linux/clk.h
-@@ -479,6 +479,22 @@ int __must_check devm_clk_bulk_get_all(s
- struct clk_bulk_data **clks);
-
- /**
-+ * devm_clk_bulk_get_all_enable - Get and enable all clocks of the consumer (managed)
-+ * @dev: device for clock "consumer"
-+ * @clks: pointer to the clk_bulk_data table of consumer
-+ *
-+ * Returns success (0) or negative errno.
-+ *
-+ * This helper function allows drivers to get all clocks of the
-+ * consumer and enables them in one operation with management.
-+ * The clks will automatically be disabled and freed when the device
-+ * is unbound.
-+ */
-+
-+int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
-+ struct clk_bulk_data **clks);
-+
-+/**
- * devm_clk_get - lookup and obtain a managed reference to a clock producer.
- * @dev: device for clock "consumer"
- * @id: clock consumer ID
-@@ -968,6 +984,12 @@ static inline int __must_check devm_clk_
- return 0;
- }
-
-+static inline int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
-+ struct clk_bulk_data **clks)
-+{
-+ return 0;
-+}
-+
- static inline struct clk *devm_get_clk_from_child(struct device *dev,
- struct device_node *np, const char *con_id)
- {
+++ /dev/null
-From 51e32e897539663957f7a0950f66b48f8896efee Mon Sep 17 00:00:00 2001
-From: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
-Date: Sat, 19 Oct 2024 14:16:00 +0300
-Subject: [PATCH] clk: Provide devm_clk_bulk_get_all_enabled() helper
-
-Commit 265b07df758a ("clk: Provide managed helper to get and enable bulk
-clocks") added devm_clk_bulk_get_all_enable() function, but missed to
-return the number of clocks stored in the clk_bulk_data table referenced
-by the clks argument. Without knowing the number, it's not possible to
-iterate these clocks when needed, hence the argument is useless and
-could have been simply removed.
-
-Introduce devm_clk_bulk_get_all_enabled() variant, which is consistent
-with devm_clk_bulk_get_all() in terms of the returned value:
-
- > 0 if one or more clocks have been stored
- = 0 if there are no clocks
- < 0 if an error occurred
-
-Moreover, the naming is consistent with devm_clk_get_enabled(), i.e. use
-the past form of 'enable'.
-
-To reduce code duplication and improve patch readability, make
-devm_clk_bulk_get_all_enable() use the new helper, as suggested by
-Stephen Boyd.
-
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
-Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-Signed-off-by: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
-Link: https://lore.kernel.org/r/20241019-clk_bulk_ena_fix-v4-1-57f108f64e70@collabora.com
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
----
- drivers/clk/clk-devres.c | 9 +++++----
- include/linux/clk.h | 21 ++++++++++++++++-----
- 2 files changed, 21 insertions(+), 9 deletions(-)
-
---- a/drivers/clk/clk-devres.c
-+++ b/drivers/clk/clk-devres.c
-@@ -190,8 +190,8 @@ static void devm_clk_bulk_release_all_en
- clk_bulk_put_all(devres->num_clks, devres->clks);
- }
-
--int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
-- struct clk_bulk_data **clks)
-+int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
-+ struct clk_bulk_data **clks)
- {
- struct clk_bulk_devres *devres;
- int ret;
-@@ -216,11 +216,12 @@ int __must_check devm_clk_bulk_get_all_e
- } else {
- clk_bulk_put_all(devres->num_clks, devres->clks);
- devres_free(devres);
-+ return ret;
- }
-
-- return ret;
-+ return devres->num_clks;
- }
--EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enable);
-+EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enabled);
-
- static int devm_clk_match(struct device *dev, void *res, void *data)
- {
---- a/include/linux/clk.h
-+++ b/include/linux/clk.h
-@@ -479,11 +479,13 @@ int __must_check devm_clk_bulk_get_all(s
- struct clk_bulk_data **clks);
-
- /**
-- * devm_clk_bulk_get_all_enable - Get and enable all clocks of the consumer (managed)
-+ * devm_clk_bulk_get_all_enabled - Get and enable all clocks of the consumer (managed)
- * @dev: device for clock "consumer"
- * @clks: pointer to the clk_bulk_data table of consumer
- *
-- * Returns success (0) or negative errno.
-+ * Returns a positive value for the number of clocks obtained while the
-+ * clock references are stored in the clk_bulk_data table in @clks field.
-+ * Returns 0 if there're none and a negative value if something failed.
- *
- * This helper function allows drivers to get all clocks of the
- * consumer and enables them in one operation with management.
-@@ -491,8 +493,8 @@ int __must_check devm_clk_bulk_get_all(s
- * is unbound.
- */
-
--int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
-- struct clk_bulk_data **clks);
-+int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
-+ struct clk_bulk_data **clks);
-
- /**
- * devm_clk_get - lookup and obtain a managed reference to a clock producer.
-@@ -984,7 +986,7 @@ static inline int __must_check devm_clk_
- return 0;
- }
-
--static inline int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
-+static inline int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
- struct clk_bulk_data **clks)
- {
- return 0;
-@@ -1086,6 +1088,15 @@ static inline void clk_restore_context(v
-
- #endif
-
-+/* Deprecated. Use devm_clk_bulk_get_all_enabled() */
-+static inline int __must_check
-+devm_clk_bulk_get_all_enable(struct device *dev, struct clk_bulk_data **clks)
-+{
-+ int ret = devm_clk_bulk_get_all_enabled(dev, clks);
-+
-+ return ret > 0 ? 0 : ret;
-+}
-+
- /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
- static inline int clk_prepare_enable(struct clk *clk)
- {
+++ /dev/null
-From 475beea0b9f631656b5cc39429a39696876af613 Mon Sep 17 00:00:00 2001
-From: Alexandru Gagniuc <mr.nuke.me@gmail.com>
-Date: Tue, 30 Apr 2024 23:07:43 -0500
-Subject: [PATCH] dt-bindings: clock: Add PCIe pipe related clocks for IPQ9574
-
-Add defines for the missing PCIe PIPE clocks.
-
-Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
-Acked-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
-Link: https://lore.kernel.org/r/20240501040800.1542805-2-mr.nuke.me@gmail.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- include/dt-bindings/clock/qcom,ipq9574-gcc.h | 4 ++++
- 1 file changed, 4 insertions(+)
-
---- a/include/dt-bindings/clock/qcom,ipq9574-gcc.h
-+++ b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
-@@ -216,4 +216,8 @@
- #define GCC_CRYPTO_AHB_CLK 207
- #define GCC_USB0_PIPE_CLK 208
- #define GCC_USB0_SLEEP_CLK 209
-+#define GCC_PCIE0_PIPE_CLK 210
-+#define GCC_PCIE1_PIPE_CLK 211
-+#define GCC_PCIE2_PIPE_CLK 212
-+#define GCC_PCIE3_PIPE_CLK 213
- #endif
+++ /dev/null
-From a8fe85d40ffe5ec0fd2f557932ffee902be35b38 Mon Sep 17 00:00:00 2001
-From: Alexandru Gagniuc <mr.nuke.me@gmail.com>
-Date: Tue, 30 Apr 2024 23:07:44 -0500
-Subject: [PATCH] clk: qcom: gcc-ipq9574: Add PCIe pipe clocks
-
-The IPQ9574 has four PCIe "pipe" clocks. These clocks are required by
-PCIe PHYs. Port the pipe clocks from the downstream 5.4 kernel.
-
-Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
-Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
-Link: https://lore.kernel.org/r/20240501040800.1542805-3-mr.nuke.me@gmail.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- drivers/clk/qcom/gcc-ipq9574.c | 76 ++++++++++++++++++++++++++++++++++
- 1 file changed, 76 insertions(+)
-
---- a/drivers/clk/qcom/gcc-ipq9574.c
-+++ b/drivers/clk/qcom/gcc-ipq9574.c
-@@ -1569,6 +1569,24 @@ static struct clk_regmap_phy_mux pcie0_p
- },
- };
-
-+static struct clk_branch gcc_pcie0_pipe_clk = {
-+ .halt_reg = 0x28044,
-+ .halt_check = BRANCH_HALT_DELAY,
-+ .clkr = {
-+ .enable_reg = 0x28044,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "gcc_pcie0_pipe_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &pcie0_pipe_clk_src.clkr.hw
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
- static struct clk_regmap_phy_mux pcie1_pipe_clk_src = {
- .reg = 0x29064,
- .clkr = {
-@@ -1583,6 +1601,24 @@ static struct clk_regmap_phy_mux pcie1_p
- },
- };
-
-+static struct clk_branch gcc_pcie1_pipe_clk = {
-+ .halt_reg = 0x29044,
-+ .halt_check = BRANCH_HALT_DELAY,
-+ .clkr = {
-+ .enable_reg = 0x29044,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "gcc_pcie1_pipe_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &pcie1_pipe_clk_src.clkr.hw
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
- static struct clk_regmap_phy_mux pcie2_pipe_clk_src = {
- .reg = 0x2a064,
- .clkr = {
-@@ -1597,6 +1633,24 @@ static struct clk_regmap_phy_mux pcie2_p
- },
- };
-
-+static struct clk_branch gcc_pcie2_pipe_clk = {
-+ .halt_reg = 0x2a044,
-+ .halt_check = BRANCH_HALT_DELAY,
-+ .clkr = {
-+ .enable_reg = 0x2a044,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "gcc_pcie2_pipe_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &pcie2_pipe_clk_src.clkr.hw
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
- static struct clk_regmap_phy_mux pcie3_pipe_clk_src = {
- .reg = 0x2b064,
- .clkr = {
-@@ -1611,6 +1665,24 @@ static struct clk_regmap_phy_mux pcie3_p
- },
- };
-
-+static struct clk_branch gcc_pcie3_pipe_clk = {
-+ .halt_reg = 0x2b044,
-+ .halt_check = BRANCH_HALT_DELAY,
-+ .clkr = {
-+ .enable_reg = 0x2b044,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "gcc_pcie3_pipe_clk",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &pcie3_pipe_clk_src.clkr.hw
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
- static const struct freq_tbl ftbl_pcie_rchng_clk_src[] = {
- F(24000000, P_XO, 1, 0, 0),
- F(100000000, P_GPLL0, 8, 0, 0),
-@@ -4143,6 +4215,10 @@ static struct clk_regmap *gcc_ipq9574_cl
- [GCC_SNOC_PCIE1_1LANE_S_CLK] = &gcc_snoc_pcie1_1lane_s_clk.clkr,
- [GCC_SNOC_PCIE2_2LANE_S_CLK] = &gcc_snoc_pcie2_2lane_s_clk.clkr,
- [GCC_SNOC_PCIE3_2LANE_S_CLK] = &gcc_snoc_pcie3_2lane_s_clk.clkr,
-+ [GCC_PCIE0_PIPE_CLK] = &gcc_pcie0_pipe_clk.clkr,
-+ [GCC_PCIE1_PIPE_CLK] = &gcc_pcie1_pipe_clk.clkr,
-+ [GCC_PCIE2_PIPE_CLK] = &gcc_pcie2_pipe_clk.clkr,
-+ [GCC_PCIE3_PIPE_CLK] = &gcc_pcie3_pipe_clk.clkr,
- };
-
- static const struct qcom_reset_map gcc_ipq9574_resets[] = {
+++ /dev/null
-From ef3308cf52553522d619a858a72a68f82432865b Mon Sep 17 00:00:00 2001
-From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
-Date: Wed, 29 May 2024 17:47:10 +0300
-Subject: [PATCH] arm64: dts: qcom: ipq9574: drop #power-domain-cells property
- of GCC
-
-On IPQ9574 the Global Clock Controller (GCC) doesn't provide power
-domains. Drop the #power-domain-cells property from the controller
-device node.
-
-Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
-Link: https://lore.kernel.org/r/20240529-qcom-gdscs-v2-12-69c63d0ae1e7@linaro.org
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 1 -
- 1 file changed, 1 deletion(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -296,7 +296,6 @@
- <0>;
- #clock-cells = <1>;
- #reset-cells = <1>;
-- #power-domain-cells = <1>;
- };
-
- tcsr_mutex: hwlock@1905000 {
+++ /dev/null
-From f45b94ffc5f1204b35b5c695ed265b1385951616 Mon Sep 17 00:00:00 2001
-From: Varadarajan Narayanan <quic_varada@quicinc.com>
-Date: Tue, 30 Apr 2024 12:12:09 +0530
-Subject: [PATCH] interconnect: icc-clk: Specify master/slave ids
-
-Presently, icc-clk driver autogenerates the master and slave ids.
-However, devices with multiple nodes on the interconnect could
-have other constraints and may not match with the auto generated
-node ids.
-
-Hence, modify the driver to use the master/slave ids provided by
-the caller instead of auto generating.
-
-Also, update clk-cbf-8996 accordingly.
-
-Acked-by: Georgi Djakov <djakov@kernel.org>
-Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
-Link: https://lore.kernel.org/r/20240430064214.2030013-2-quic_varada@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- drivers/clk/qcom/clk-cbf-8996.c | 7 ++++++-
- drivers/interconnect/icc-clk.c | 6 +++---
- include/linux/interconnect-clk.h | 2 ++
- 3 files changed, 11 insertions(+), 4 deletions(-)
-
---- a/drivers/clk/qcom/clk-cbf-8996.c
-+++ b/drivers/clk/qcom/clk-cbf-8996.c
-@@ -237,7 +237,12 @@ static int qcom_msm8996_cbf_icc_register
- struct device *dev = &pdev->dev;
- struct clk *clk = devm_clk_hw_get_clk(dev, cbf_hw, "cbf");
- const struct icc_clk_data data[] = {
-- { .clk = clk, .name = "cbf", },
-+ {
-+ .clk = clk,
-+ .name = "cbf",
-+ .master_id = MASTER_CBF_M4M,
-+ .slave_id = SLAVE_CBF_M4M,
-+ },
- };
- struct icc_provider *provider;
-
---- a/drivers/interconnect/icc-clk.c
-+++ b/drivers/interconnect/icc-clk.c
-@@ -109,7 +109,7 @@ struct icc_provider *icc_clk_register(st
- for (i = 0, j = 0; i < num_clocks; i++) {
- qp->clocks[i].clk = data[i].clk;
-
-- node = icc_node_create(first_id + j);
-+ node = icc_node_create(first_id + data[i].master_id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
-@@ -119,10 +119,10 @@ struct icc_provider *icc_clk_register(st
- node->data = &qp->clocks[i];
- icc_node_add(node, provider);
- /* link to the next node, slave */
-- icc_link_create(node, first_id + j + 1);
-+ icc_link_create(node, first_id + data[i].slave_id);
- onecell->nodes[j++] = node;
-
-- node = icc_node_create(first_id + j);
-+ node = icc_node_create(first_id + data[i].slave_id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
---- a/include/linux/interconnect-clk.h
-+++ b/include/linux/interconnect-clk.h
-@@ -11,6 +11,8 @@ struct device;
- struct icc_clk_data {
- struct clk *clk;
- const char *name;
-+ unsigned int master_id;
-+ unsigned int slave_id;
- };
-
- struct icc_provider *icc_clk_register(struct device *dev,
+++ /dev/null
-From d1f1570f3d6db5d35642092a671812e62bfba79d Mon Sep 17 00:00:00 2001
-From: Varadarajan Narayanan <quic_varada@quicinc.com>
-Date: Tue, 30 Apr 2024 12:12:10 +0530
-Subject: [PATCH] dt-bindings: interconnect: Add Qualcomm IPQ9574 support
-
-Add interconnect-cells to clock provider so that it can be
-used as icc provider.
-
-Add master/slave ids for Qualcomm IPQ9574 Network-On-Chip
-interfaces. This will be used by the gcc-ipq9574 driver
-that will for providing interconnect services using the
-icc-clk framework.
-
-Acked-by: Georgi Djakov <djakov@kernel.org>
-Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
-Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
-Link: https://lore.kernel.org/r/20240430064214.2030013-3-quic_varada@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- .../bindings/clock/qcom,ipq9574-gcc.yaml | 3 +
- .../dt-bindings/interconnect/qcom,ipq9574.h | 59 +++++++++++++++++++
- 2 files changed, 62 insertions(+)
- create mode 100644 include/dt-bindings/interconnect/qcom,ipq9574.h
-
---- a/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml
-+++ b/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml
-@@ -33,6 +33,9 @@ properties:
- - description: PCIE30 PHY3 pipe clock source
- - description: USB3 PHY pipe clock source
-
-+ '#interconnect-cells':
-+ const: 1
-+
- required:
- - compatible
- - clocks
---- /dev/null
-+++ b/include/dt-bindings/interconnect/qcom,ipq9574.h
-@@ -0,0 +1,59 @@
-+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-+#ifndef INTERCONNECT_QCOM_IPQ9574_H
-+#define INTERCONNECT_QCOM_IPQ9574_H
-+
-+#define MASTER_ANOC_PCIE0 0
-+#define SLAVE_ANOC_PCIE0 1
-+#define MASTER_SNOC_PCIE0 2
-+#define SLAVE_SNOC_PCIE0 3
-+#define MASTER_ANOC_PCIE1 4
-+#define SLAVE_ANOC_PCIE1 5
-+#define MASTER_SNOC_PCIE1 6
-+#define SLAVE_SNOC_PCIE1 7
-+#define MASTER_ANOC_PCIE2 8
-+#define SLAVE_ANOC_PCIE2 9
-+#define MASTER_SNOC_PCIE2 10
-+#define SLAVE_SNOC_PCIE2 11
-+#define MASTER_ANOC_PCIE3 12
-+#define SLAVE_ANOC_PCIE3 13
-+#define MASTER_SNOC_PCIE3 14
-+#define SLAVE_SNOC_PCIE3 15
-+#define MASTER_USB 16
-+#define SLAVE_USB 17
-+#define MASTER_USB_AXI 18
-+#define SLAVE_USB_AXI 19
-+#define MASTER_NSSNOC_NSSCC 20
-+#define SLAVE_NSSNOC_NSSCC 21
-+#define MASTER_NSSNOC_SNOC_0 22
-+#define SLAVE_NSSNOC_SNOC_0 23
-+#define MASTER_NSSNOC_SNOC_1 24
-+#define SLAVE_NSSNOC_SNOC_1 25
-+#define MASTER_NSSNOC_PCNOC_1 26
-+#define SLAVE_NSSNOC_PCNOC_1 27
-+#define MASTER_NSSNOC_QOSGEN_REF 28
-+#define SLAVE_NSSNOC_QOSGEN_REF 29
-+#define MASTER_NSSNOC_TIMEOUT_REF 30
-+#define SLAVE_NSSNOC_TIMEOUT_REF 31
-+#define MASTER_NSSNOC_XO_DCD 32
-+#define SLAVE_NSSNOC_XO_DCD 33
-+#define MASTER_NSSNOC_ATB 34
-+#define SLAVE_NSSNOC_ATB 35
-+#define MASTER_MEM_NOC_NSSNOC 36
-+#define SLAVE_MEM_NOC_NSSNOC 37
-+#define MASTER_NSSNOC_MEMNOC 38
-+#define SLAVE_NSSNOC_MEMNOC 39
-+#define MASTER_NSSNOC_MEM_NOC_1 40
-+#define SLAVE_NSSNOC_MEM_NOC_1 41
-+
-+#define MASTER_NSSNOC_PPE 0
-+#define SLAVE_NSSNOC_PPE 1
-+#define MASTER_NSSNOC_PPE_CFG 2
-+#define SLAVE_NSSNOC_PPE_CFG 3
-+#define MASTER_NSSNOC_NSS_CSR 4
-+#define SLAVE_NSSNOC_NSS_CSR 5
-+#define MASTER_NSSNOC_IMEM_QSB 6
-+#define SLAVE_NSSNOC_IMEM_QSB 7
-+#define MASTER_NSSNOC_IMEM_AHB 8
-+#define SLAVE_NSSNOC_IMEM_AHB 9
-+
-+#endif /* INTERCONNECT_QCOM_IPQ9574_H */
+++ /dev/null
-From d3153113619216e87038a20bebf82582f9be10e7 Mon Sep 17 00:00:00 2001
-From: Varadarajan Narayanan <quic_varada@quicinc.com>
-Date: Tue, 30 Apr 2024 12:12:11 +0530
-Subject: [PATCH] interconnect: icc-clk: Add devm_icc_clk_register
-
-Wrap icc_clk_register to create devm_icc_clk_register to be
-able to release the resources properly.
-
-Acked-by: Georgi Djakov <djakov@kernel.org>
-Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
-Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
-Link: https://lore.kernel.org/r/20240430064214.2030013-4-quic_varada@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- drivers/interconnect/icc-clk.c | 18 ++++++++++++++++++
- include/linux/interconnect-clk.h | 2 ++
- 2 files changed, 20 insertions(+)
-
---- a/drivers/interconnect/icc-clk.c
-+++ b/drivers/interconnect/icc-clk.c
-@@ -147,6 +147,24 @@ err:
- }
- EXPORT_SYMBOL_GPL(icc_clk_register);
-
-+static void devm_icc_release(void *res)
-+{
-+ icc_clk_unregister(res);
-+}
-+
-+int devm_icc_clk_register(struct device *dev, unsigned int first_id,
-+ unsigned int num_clocks, const struct icc_clk_data *data)
-+{
-+ struct icc_provider *prov;
-+
-+ prov = icc_clk_register(dev, first_id, num_clocks, data);
-+ if (IS_ERR(prov))
-+ return PTR_ERR(prov);
-+
-+ return devm_add_action_or_reset(dev, devm_icc_release, prov);
-+}
-+EXPORT_SYMBOL_GPL(devm_icc_clk_register);
-+
- /**
- * icc_clk_unregister() - unregister a previously registered clk interconnect provider
- * @provider: provider returned by icc_clk_register()
---- a/include/linux/interconnect-clk.h
-+++ b/include/linux/interconnect-clk.h
-@@ -19,6 +19,8 @@ struct icc_provider *icc_clk_register(st
- unsigned int first_id,
- unsigned int num_clocks,
- const struct icc_clk_data *data);
-+int devm_icc_clk_register(struct device *dev, unsigned int first_id,
-+ unsigned int num_clocks, const struct icc_clk_data *data);
- void icc_clk_unregister(struct icc_provider *provider);
-
- #endif
+++ /dev/null
-From 8737ec830ee32162858af7c1504169b05b313ab1 Mon Sep 17 00:00:00 2001
-From: Varadarajan Narayanan <quic_varada@quicinc.com>
-Date: Tue, 30 Apr 2024 12:12:12 +0530
-Subject: [PATCH] clk: qcom: common: Add interconnect clocks support
-
-Unlike MSM platforms that manage NoC related clocks and scaling
-from RPM, IPQ SoCs dont involve RPM in managing NoC related
-clocks and there is no NoC scaling.
-
-However, there is a requirement to enable some NoC interface
-clocks for accessing the peripheral controllers present on
-these NoCs. Though exposing these as normal clocks would work,
-having a minimalistic interconnect driver to handle these clocks
-would make it consistent with other Qualcomm platforms resulting
-in common code paths. This is similar to msm8996-cbf's usage of
-icc-clk framework.
-
-Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
-Link: https://lore.kernel.org/r/20240430064214.2030013-5-quic_varada@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- drivers/clk/qcom/common.c | 35 ++++++++++++++++++++++++++++++++++-
- drivers/clk/qcom/common.h | 9 +++++++++
- 2 files changed, 43 insertions(+), 1 deletion(-)
-
---- a/drivers/clk/qcom/common.c
-+++ b/drivers/clk/qcom/common.c
-@@ -8,6 +8,7 @@
- #include <linux/regmap.h>
- #include <linux/platform_device.h>
- #include <linux/clk-provider.h>
-+#include <linux/interconnect-clk.h>
- #include <linux/reset-controller.h>
- #include <linux/of.h>
-
-@@ -250,6 +251,38 @@ static struct clk_hw *qcom_cc_clk_hw_get
- return cc->rclks[idx] ? &cc->rclks[idx]->hw : NULL;
- }
-
-+static int qcom_cc_icc_register(struct device *dev,
-+ const struct qcom_cc_desc *desc)
-+{
-+ struct icc_clk_data *icd;
-+ struct clk_hw *hws;
-+ int i;
-+
-+ if (!IS_ENABLED(CONFIG_INTERCONNECT_CLK))
-+ return 0;
-+
-+ if (!desc->icc_hws)
-+ return 0;
-+
-+ icd = devm_kcalloc(dev, desc->num_icc_hws, sizeof(*icd), GFP_KERNEL);
-+ if (!icd)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < desc->num_icc_hws; i++) {
-+ icd[i].master_id = desc->icc_hws[i].master_id;
-+ icd[i].slave_id = desc->icc_hws[i].slave_id;
-+ hws = &desc->clks[desc->icc_hws[i].clk_id]->hw;
-+ icd[i].clk = devm_clk_hw_get_clk(dev, hws, "icc");
-+ if (!icd[i].clk)
-+ return dev_err_probe(dev, -ENOENT,
-+ "(%d) clock entry is null\n", i);
-+ icd[i].name = clk_hw_get_name(hws);
-+ }
-+
-+ return devm_icc_clk_register(dev, desc->icc_first_node_id,
-+ desc->num_icc_hws, icd);
-+}
-+
- int qcom_cc_really_probe(struct device *dev,
- const struct qcom_cc_desc *desc, struct regmap *regmap)
- {
-@@ -318,7 +351,7 @@ int qcom_cc_really_probe(struct device *
- if (ret)
- return ret;
-
-- return 0;
-+ return qcom_cc_icc_register(dev, desc);
- }
- EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
-
---- a/drivers/clk/qcom/common.h
-+++ b/drivers/clk/qcom/common.h
-@@ -19,6 +19,12 @@ struct clk_hw;
- #define PLL_VOTE_FSM_ENA BIT(20)
- #define PLL_VOTE_FSM_RESET BIT(21)
-
-+struct qcom_icc_hws_data {
-+ int master_id;
-+ int slave_id;
-+ int clk_id;
-+};
-+
- struct qcom_cc_desc {
- const struct regmap_config *config;
- struct clk_regmap **clks;
-@@ -29,6 +35,9 @@ struct qcom_cc_desc {
- size_t num_gdscs;
- struct clk_hw **clk_hws;
- size_t num_clk_hws;
-+ struct qcom_icc_hws_data *icc_hws;
-+ size_t num_icc_hws;
-+ unsigned int icc_first_node_id;
- };
-
- /**
+++ /dev/null
-From 23711cabe122ef55bcb2e5c3e3835b5a2a688fc0 Mon Sep 17 00:00:00 2001
-From: Varadarajan Narayanan <quic_varada@quicinc.com>
-Date: Tue, 30 Apr 2024 12:12:13 +0530
-Subject: [PATCH] clk: qcom: ipq9574: Use icc-clk for enabling NoC related
- clocks
-
-Use the icc-clk framework to enable few clocks to be able to
-create paths and use the peripherals connected on those NoCs.
-
-Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
-Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
-Link: https://lore.kernel.org/r/20240430064214.2030013-6-quic_varada@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- drivers/clk/qcom/Kconfig | 2 ++
- drivers/clk/qcom/gcc-ipq9574.c | 33 +++++++++++++++++++++++++++++++++
- 2 files changed, 35 insertions(+)
-
---- a/drivers/clk/qcom/Kconfig
-+++ b/drivers/clk/qcom/Kconfig
-@@ -17,6 +17,8 @@ menuconfig COMMON_CLK_QCOM
- select RATIONAL
- select REGMAP_MMIO
- select RESET_CONTROLLER
-+ select INTERCONNECT
-+ select INTERCONNECT_CLK
-
- if COMMON_CLK_QCOM
-
---- a/drivers/clk/qcom/gcc-ipq9574.c
-+++ b/drivers/clk/qcom/gcc-ipq9574.c
-@@ -4,6 +4,8 @@
- */
-
- #include <linux/clk-provider.h>
-+#include <linux/interconnect-clk.h>
-+#include <linux/interconnect-provider.h>
- #include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/of.h>
-@@ -12,6 +14,7 @@
-
- #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
- #include <dt-bindings/reset/qcom,ipq9574-gcc.h>
-+#include <dt-bindings/interconnect/qcom,ipq9574.h>
-
- #include "clk-alpha-pll.h"
- #include "clk-branch.h"
-@@ -4379,6 +4382,32 @@ static const struct qcom_reset_map gcc_i
- [GCC_WCSS_Q6_TBU_BCR] = { 0x12054, 0 },
- };
-
-+#define IPQ_APPS_ID 9574 /* some unique value */
-+
-+static struct qcom_icc_hws_data icc_ipq9574_hws[] = {
-+ { MASTER_ANOC_PCIE0, SLAVE_ANOC_PCIE0, GCC_ANOC_PCIE0_1LANE_M_CLK },
-+ { MASTER_SNOC_PCIE0, SLAVE_SNOC_PCIE0, GCC_SNOC_PCIE0_1LANE_S_CLK },
-+ { MASTER_ANOC_PCIE1, SLAVE_ANOC_PCIE1, GCC_ANOC_PCIE1_1LANE_M_CLK },
-+ { MASTER_SNOC_PCIE1, SLAVE_SNOC_PCIE1, GCC_SNOC_PCIE1_1LANE_S_CLK },
-+ { MASTER_ANOC_PCIE2, SLAVE_ANOC_PCIE2, GCC_ANOC_PCIE2_2LANE_M_CLK },
-+ { MASTER_SNOC_PCIE2, SLAVE_SNOC_PCIE2, GCC_SNOC_PCIE2_2LANE_S_CLK },
-+ { MASTER_ANOC_PCIE3, SLAVE_ANOC_PCIE3, GCC_ANOC_PCIE3_2LANE_M_CLK },
-+ { MASTER_SNOC_PCIE3, SLAVE_SNOC_PCIE3, GCC_SNOC_PCIE3_2LANE_S_CLK },
-+ { MASTER_USB, SLAVE_USB, GCC_SNOC_USB_CLK },
-+ { MASTER_USB_AXI, SLAVE_USB_AXI, GCC_ANOC_USB_AXI_CLK },
-+ { MASTER_NSSNOC_NSSCC, SLAVE_NSSNOC_NSSCC, GCC_NSSNOC_NSSCC_CLK },
-+ { MASTER_NSSNOC_SNOC_0, SLAVE_NSSNOC_SNOC_0, GCC_NSSNOC_SNOC_CLK },
-+ { MASTER_NSSNOC_SNOC_1, SLAVE_NSSNOC_SNOC_1, GCC_NSSNOC_SNOC_1_CLK },
-+ { MASTER_NSSNOC_PCNOC_1, SLAVE_NSSNOC_PCNOC_1, GCC_NSSNOC_PCNOC_1_CLK },
-+ { MASTER_NSSNOC_QOSGEN_REF, SLAVE_NSSNOC_QOSGEN_REF, GCC_NSSNOC_QOSGEN_REF_CLK },
-+ { MASTER_NSSNOC_TIMEOUT_REF, SLAVE_NSSNOC_TIMEOUT_REF, GCC_NSSNOC_TIMEOUT_REF_CLK },
-+ { MASTER_NSSNOC_XO_DCD, SLAVE_NSSNOC_XO_DCD, GCC_NSSNOC_XO_DCD_CLK },
-+ { MASTER_NSSNOC_ATB, SLAVE_NSSNOC_ATB, GCC_NSSNOC_ATB_CLK },
-+ { MASTER_MEM_NOC_NSSNOC, SLAVE_MEM_NOC_NSSNOC, GCC_MEM_NOC_NSSNOC_CLK },
-+ { MASTER_NSSNOC_MEMNOC, SLAVE_NSSNOC_MEMNOC, GCC_NSSNOC_MEMNOC_CLK },
-+ { MASTER_NSSNOC_MEM_NOC_1, SLAVE_NSSNOC_MEM_NOC_1, GCC_NSSNOC_MEM_NOC_1_CLK },
-+};
-+
- static const struct of_device_id gcc_ipq9574_match_table[] = {
- { .compatible = "qcom,ipq9574-gcc" },
- { }
-@@ -4401,6 +4430,9 @@ static const struct qcom_cc_desc gcc_ipq
- .num_resets = ARRAY_SIZE(gcc_ipq9574_resets),
- .clk_hws = gcc_ipq9574_hws,
- .num_clk_hws = ARRAY_SIZE(gcc_ipq9574_hws),
-+ .icc_hws = icc_ipq9574_hws,
-+ .num_icc_hws = ARRAY_SIZE(icc_ipq9574_hws),
-+ .icc_first_node_id = IPQ_APPS_ID,
- };
-
- static int gcc_ipq9574_probe(struct platform_device *pdev)
-@@ -4413,6 +4445,7 @@ static struct platform_driver gcc_ipq957
- .driver = {
- .name = "qcom,gcc-ipq9574",
- .of_match_table = gcc_ipq9574_match_table,
-+ .sync_state = icc_sync_state,
- },
- };
-
+++ /dev/null
-From 5d0ab61a700214366dfcca5893b87655261e8c94 Mon Sep 17 00:00:00 2001
-From: Varadarajan Narayanan <quic_varada@quicinc.com>
-Date: Tue, 30 Apr 2024 12:12:14 +0530
-Subject: [PATCH] arm64: dts: qcom: ipq9574: Add icc provider ability to gcc
-
-IPQ SoCs dont involve RPM in managing NoC related clocks and
-there is no NoC scaling. Linux itself handles these clocks.
-However, these should not be exposed as just clocks and align
-with other Qualcomm SoCs that handle these clocks from a
-interconnect provider.
-
-Hence include icc provider capability to the gcc node so that
-peripherals can use the interconnect facility to enable these
-clocks.
-
-Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
-Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
-Link: https://lore.kernel.org/r/20240430064214.2030013-7-quic_varada@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -8,6 +8,7 @@
-
- #include <dt-bindings/clock/qcom,apss-ipq.h>
- #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
-+#include <dt-bindings/interconnect/qcom,ipq9574.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
- #include <dt-bindings/reset/qcom,ipq9574-gcc.h>
- #include <dt-bindings/thermal/thermal.h>
-@@ -296,6 +297,7 @@
- <0>;
- #clock-cells = <1>;
- #reset-cells = <1>;
-+ #interconnect-cells = <1>;
- };
-
- tcsr_mutex: hwlock@1905000 {
+++ /dev/null
-From ba5a61a08d83b18b99c461b4ddb9009947a4aa0e Mon Sep 17 00:00:00 2001
-From: Varadarajan Narayanan <quic_varada@quicinc.com>
-Date: Tue, 31 Oct 2023 12:41:38 +0530
-Subject: [PATCH 1/2] cpufreq: qcom-nvmem: Enable cpufreq for ipq53xx
-
-IPQ53xx have different OPPs available for the CPU based on
-SoC variant. This can be determined through use of an eFuse
-register present in the silicon.
-
-Added support for ipq53xx on nvmem driver which helps to
-determine OPPs at runtime based on the eFuse register which
-has the CPU frequency limits. opp-supported-hw dt binding
-can be used to indicate the available OPPs for each limit.
-
-nvmem driver also creates the "cpufreq-dt" platform_device after
-passing the version matching data to the OPP framework so that the
-cpufreq-dt handles the actual cpufreq implementation.
-
-Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
-Reviewed-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
-Signed-off-by: Kathiravan T <quic_kathirav@quicinc.com>
-Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
-[ Viresh: Fixed subject ]
-Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
----
- drivers/cpufreq/cpufreq-dt-platdev.c | 1 +
- drivers/cpufreq/qcom-cpufreq-nvmem.c | 6 ++++++
- 2 files changed, 7 insertions(+)
-
---- a/drivers/cpufreq/cpufreq-dt-platdev.c
-+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
-@@ -177,6 +177,7 @@ static const struct of_device_id blockli
- { .compatible = "ti,am625", },
- { .compatible = "ti,am62a7", },
-
-+ { .compatible = "qcom,ipq5332", },
- { .compatible = "qcom,ipq8064", },
- { .compatible = "qcom,apq8064", },
- { .compatible = "qcom,msm8974", },
---- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
-+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
-@@ -152,6 +152,11 @@ static int qcom_cpufreq_kryo_name_versio
- switch (msm_id) {
- case QCOM_ID_MSM8996:
- case QCOM_ID_APQ8096:
-+ case QCOM_ID_IPQ5332:
-+ case QCOM_ID_IPQ5322:
-+ case QCOM_ID_IPQ5312:
-+ case QCOM_ID_IPQ5302:
-+ case QCOM_ID_IPQ5300:
- drv->versions = 1 << (unsigned int)(*speedbin);
- break;
- case QCOM_ID_MSM8996SG:
-@@ -353,6 +358,7 @@ static const struct of_device_id qcom_cp
- { .compatible = "qcom,apq8096", .data = &match_data_kryo },
- { .compatible = "qcom,msm8996", .data = &match_data_kryo },
- { .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
-+ { .compatible = "qcom,ipq5332", .data = &match_data_kryo },
- { .compatible = "qcom,ipq8064", .data = &match_data_krait },
- { .compatible = "qcom,apq8064", .data = &match_data_krait },
- { .compatible = "qcom,msm8974", .data = &match_data_krait },
+++ /dev/null
-From 5b5b5806f22390808b8e8fa180fe35b003a4a74d Mon Sep 17 00:00:00 2001
-From: Varadarajan Narayanan <quic_varada@quicinc.com>
-Date: Tue, 31 Oct 2023 12:41:39 +0530
-Subject: [PATCH 2/2] cpufreq: qcom-nvmem: Introduce cpufreq for ipq95xx
-
-IPQ95xx SoCs have different OPPs available for the CPU based on
-the SoC variant. This can be determined from an eFuse register
-present in the silicon.
-
-Added support for ipq95xx on nvmem driver which helps to
-determine OPPs at runtime based on the eFuse register which
-has the CPU frequency limits. opp-supported-hw dt binding
-can be used to indicate the available OPPs for each limit.
-
-Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
-Signed-off-by: Praveenkumar I <ipkumar@codeaurora.org>
-Signed-off-by: Kathiravan T <quic_kathirav@quicinc.com>
-Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
-[ Viresh: Fixed subject ]
-Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
----
- drivers/cpufreq/cpufreq-dt-platdev.c | 1 +
- drivers/cpufreq/qcom-cpufreq-nvmem.c | 6 ++++++
- 2 files changed, 7 insertions(+)
-
---- a/drivers/cpufreq/cpufreq-dt-platdev.c
-+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
-@@ -179,6 +179,7 @@ static const struct of_device_id blockli
-
- { .compatible = "qcom,ipq5332", },
- { .compatible = "qcom,ipq8064", },
-+ { .compatible = "qcom,ipq9574", },
- { .compatible = "qcom,apq8064", },
- { .compatible = "qcom,msm8974", },
- { .compatible = "qcom,msm8960", },
---- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
-+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
-@@ -157,6 +157,11 @@ static int qcom_cpufreq_kryo_name_versio
- case QCOM_ID_IPQ5312:
- case QCOM_ID_IPQ5302:
- case QCOM_ID_IPQ5300:
-+ case QCOM_ID_IPQ9514:
-+ case QCOM_ID_IPQ9550:
-+ case QCOM_ID_IPQ9554:
-+ case QCOM_ID_IPQ9570:
-+ case QCOM_ID_IPQ9574:
- drv->versions = 1 << (unsigned int)(*speedbin);
- break;
- case QCOM_ID_MSM8996SG:
-@@ -361,6 +366,7 @@ static const struct of_device_id qcom_cp
- { .compatible = "qcom,ipq5332", .data = &match_data_kryo },
- { .compatible = "qcom,ipq8064", .data = &match_data_krait },
- { .compatible = "qcom,apq8064", .data = &match_data_krait },
-+ { .compatible = "qcom,ipq9574", .data = &match_data_kryo },
- { .compatible = "qcom,msm8974", .data = &match_data_krait },
- { .compatible = "qcom,msm8960", .data = &match_data_krait },
- {},
+++ /dev/null
-From b36074357baf2794c825ea1c145de1d22b15380b Mon Sep 17 00:00:00 2001
-From: Varadarajan Narayanan <quic_varada@quicinc.com>
-Date: Fri, 20 Oct 2023 11:49:39 +0530
-Subject: [PATCH] arm64: dts: qcom: ipq9574: populate the opp table based on
- the eFuse
-
-IPQ95xx SoCs have different OPPs available for the CPU based on
-SoC variant. This can be determined from an eFuse register
-present in the silicon.
-
-Add support to read the eFuse and populate the OPPs based on it.
-
-Frequency 1.2GHz 1.8GHz 1.5GHz No opp-supported-hw
- Limit
-------------------------------------------------------------
-936000000 1 1 1 1 0xf
-1104000000 1 1 1 1 0xf
-1200000000 1 1 1 1 0xf
-1416000000 0 1 1 1 0x7
-1488000000 0 1 1 1 0x7
-1800000000 0 1 0 1 0x5
-2208000000 0 0 0 1 0x1
------------------------------------------------------------
-
-Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
-Signed-off-by: Kathiravan T <quic_kathirav@quicinc.com>
-Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
-Link: https://lore.kernel.org/r/14ab08b7cfd904433ca6065fac798d4f221c9d95.1697781921.git.quic_varada@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 21 ++++++++++++++++++++-
- 1 file changed, 20 insertions(+), 1 deletion(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -107,42 +107,56 @@
- };
-
- cpu_opp_table: opp-table-cpu {
-- compatible = "operating-points-v2";
-+ compatible = "operating-points-v2-kryo-cpu";
- opp-shared;
-+ nvmem-cells = <&cpu_speed_bin>;
-
- opp-936000000 {
- opp-hz = /bits/ 64 <936000000>;
- opp-microvolt = <725000>;
-+ opp-supported-hw = <0xf>;
- clock-latency-ns = <200000>;
- };
-
- opp-1104000000 {
- opp-hz = /bits/ 64 <1104000000>;
- opp-microvolt = <787500>;
-+ opp-supported-hw = <0xf>;
-+ clock-latency-ns = <200000>;
-+ };
-+
-+ opp-1200000000 {
-+ opp-hz = /bits/ 64 <1200000000>;
-+ opp-microvolt = <862500>;
-+ opp-supported-hw = <0xf>;
- clock-latency-ns = <200000>;
- };
-
- opp-1416000000 {
- opp-hz = /bits/ 64 <1416000000>;
- opp-microvolt = <862500>;
-+ opp-supported-hw = <0x7>;
- clock-latency-ns = <200000>;
- };
-
- opp-1488000000 {
- opp-hz = /bits/ 64 <1488000000>;
- opp-microvolt = <925000>;
-+ opp-supported-hw = <0x7>;
- clock-latency-ns = <200000>;
- };
-
- opp-1800000000 {
- opp-hz = /bits/ 64 <1800000000>;
- opp-microvolt = <987500>;
-+ opp-supported-hw = <0x5>;
- clock-latency-ns = <200000>;
- };
-
- opp-2208000000 {
- opp-hz = /bits/ 64 <2208000000>;
- opp-microvolt = <1062500>;
-+ opp-supported-hw = <0x1>;
- clock-latency-ns = <200000>;
- };
- };
-@@ -234,6 +248,11 @@
- reg = <0x000a4000 0x5a1>;
- #address-cells = <1>;
- #size-cells = <1>;
-+
-+ cpu_speed_bin: cpu-speed-bin@15 {
-+ reg = <0x15 0x2>;
-+ bits = <7 2>;
-+ };
- };
-
- cryptobam: dma-controller@704000 {
+++ /dev/null
-From ad663ce6780477177e301756ade6cf236f36ae4c Mon Sep 17 00:00:00 2001
-From: Varadarajan Narayanan <quic_varada@quicinc.com>
-Date: Thu, 14 Dec 2023 16:10:52 +0530
-Subject: [PATCH] regulator: qcom_smd: Add LDO5 MP5496 regulator
-
-Add support for LDO5 regulator. This is used by IPQ9574 USB.
-
-Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
-Rule: <add>
-Link: https://lore.kernel.org/stable/20231214104052.3267039-1-quic_varada%40quicinc.com
-Link: https://msgid.link/r/20231214104052.3267039-1-quic_varada@quicinc.com
-Signed-off-by: Mark Brown <broonie@kernel.org>
----
- drivers/regulator/qcom_smd-regulator.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/drivers/regulator/qcom_smd-regulator.c
-+++ b/drivers/regulator/qcom_smd-regulator.c
-@@ -796,6 +796,7 @@ static const struct rpm_regulator_data r
- { "s1", QCOM_SMD_RPM_SMPA, 1, &mp5496_smps, "s1" },
- { "s2", QCOM_SMD_RPM_SMPA, 2, &mp5496_smps, "s2" },
- { "l2", QCOM_SMD_RPM_LDOA, 2, &mp5496_ldoa2, "l2" },
-+ { "l5", QCOM_SMD_RPM_LDOA, 5, &mp5496_ldoa2, "l5" },
- {}
- };
-
+++ /dev/null
-From f81715a4c87c3b75ca2640bb61b6c66506061a64 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Fri, 3 Jan 2025 15:31:35 +0800
-Subject: [PATCH] clk: qcom: Add CMN PLL clock controller driver for IPQ SoC
-
-The CMN PLL clock controller supplies clocks to the hardware
-blocks that together make up the Ethernet function on Qualcomm
-IPQ SoCs and to GCC. The driver is initially supported for
-IPQ9574 SoC.
-
-The CMN PLL clock controller expects a reference input clock
-from the on-board Wi-Fi block acting as clock source. The input
-reference clock needs to be configured to one of the supported
-clock rates.
-
-The controller supplies a number of fixed-rate output clocks.
-For the IPQ9574, there is one output clock of 353 MHZ to PPE
-(Packet Process Engine) hardware block, three 50 MHZ output
-clocks and an additional 25 MHZ output clock supplied to the
-connected Ethernet devices. The PLL also supplies a 24 MHZ
-clock as XO and a 32 KHZ sleep clock to GCC, and one 31.25
-MHZ clock to PCS.
-
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
-Acked-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
-Link: https://lore.kernel.org/r/20250103-qcom_ipq_cmnpll-v8-2-c89fb4d4849d@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- drivers/clk/qcom/Kconfig | 9 +
- drivers/clk/qcom/Makefile | 1 +
- drivers/clk/qcom/ipq-cmn-pll.c | 435 +++++++++++++++++++++++++++++++++
- 3 files changed, 445 insertions(+)
- create mode 100644 drivers/clk/qcom/ipq-cmn-pll.c
-
---- a/drivers/clk/qcom/Kconfig
-+++ b/drivers/clk/qcom/Kconfig
-@@ -141,6 +141,15 @@ config IPQ_APSS_6018
- Say Y if you want to support CPU frequency scaling on
- ipq based devices.
-
-+config IPQ_CMN_PLL
-+ tristate "IPQ CMN PLL Clock Controller"
-+ help
-+ Support for CMN PLL clock controller on IPQ platform. The
-+ CMN PLL consumes the AHB/SYS clocks from GCC and supplies
-+ the output clocks to the networking hardware and GCC blocks.
-+ Say Y or M if you want to support CMN PLL clock on the IPQ
-+ based devices.
-+
- config IPQ_GCC_4019
- tristate "IPQ4019 Global Clock Controller"
- help
---- a/drivers/clk/qcom/Makefile
-+++ b/drivers/clk/qcom/Makefile
-@@ -23,6 +23,7 @@ obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8
- obj-$(CONFIG_CLK_GFM_LPASS_SM8250) += lpass-gfm-sm8250.o
- obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
- obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
-+obj-$(CONFIG_IPQ_CMN_PLL) += ipq-cmn-pll.o
- obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
- obj-$(CONFIG_IPQ_GCC_5018) += gcc-ipq5018.o
- obj-$(CONFIG_IPQ_GCC_5332) += gcc-ipq5332.o
---- /dev/null
-+++ b/drivers/clk/qcom/ipq-cmn-pll.c
-@@ -0,0 +1,435 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/*
-+ * CMN PLL block expects the reference clock from on-board Wi-Fi block,
-+ * and supplies fixed rate clocks as output to the networking hardware
-+ * blocks and to GCC. The networking related blocks include PPE (packet
-+ * process engine), the externally connected PHY or switch devices, and
-+ * the PCS.
-+ *
-+ * On the IPQ9574 SoC, there are three clocks with 50 MHZ and one clock
-+ * with 25 MHZ which are output from the CMN PLL to Ethernet PHY (or switch),
-+ * and one clock with 353 MHZ to PPE. The other fixed rate output clocks
-+ * are supplied to GCC (24 MHZ as XO and 32 KHZ as sleep clock), and to PCS
-+ * with 31.25 MHZ.
-+ *
-+ * +---------+
-+ * | GCC |
-+ * +--+---+--+
-+ * AHB CLK| |SYS CLK
-+ * V V
-+ * +-------+---+------+
-+ * | +-------------> eth0-50mhz
-+ * REF CLK | IPQ9574 |
-+ * -------->+ +-------------> eth1-50mhz
-+ * | CMN PLL block |
-+ * | +-------------> eth2-50mhz
-+ * | |
-+ * +----+----+----+---+-------------> eth-25mhz
-+ * | | |
-+ * V V V
-+ * GCC PCS NSS/PPE
-+ */
-+
-+#include <linux/bitfield.h>
-+#include <linux/clk-provider.h>
-+#include <linux/delay.h>
-+#include <linux/err.h>
-+#include <linux/mod_devicetable.h>
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+#include <linux/pm_clock.h>
-+#include <linux/pm_runtime.h>
-+#include <linux/regmap.h>
-+
-+#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
-+
-+#define CMN_PLL_REFCLK_SRC_SELECTION 0x28
-+#define CMN_PLL_REFCLK_SRC_DIV GENMASK(9, 8)
-+
-+#define CMN_PLL_LOCKED 0x64
-+#define CMN_PLL_CLKS_LOCKED BIT(8)
-+
-+#define CMN_PLL_POWER_ON_AND_RESET 0x780
-+#define CMN_ANA_EN_SW_RSTN BIT(6)
-+
-+#define CMN_PLL_REFCLK_CONFIG 0x784
-+#define CMN_PLL_REFCLK_EXTERNAL BIT(9)
-+#define CMN_PLL_REFCLK_DIV GENMASK(8, 4)
-+#define CMN_PLL_REFCLK_INDEX GENMASK(3, 0)
-+
-+#define CMN_PLL_CTRL 0x78c
-+#define CMN_PLL_CTRL_LOCK_DETECT_EN BIT(15)
-+
-+#define CMN_PLL_DIVIDER_CTRL 0x794
-+#define CMN_PLL_DIVIDER_CTRL_FACTOR GENMASK(9, 0)
-+
-+/**
-+ * struct cmn_pll_fixed_output_clk - CMN PLL output clocks information
-+ * @id: Clock specifier to be supplied
-+ * @name: Clock name to be registered
-+ * @rate: Clock rate
-+ */
-+struct cmn_pll_fixed_output_clk {
-+ unsigned int id;
-+ const char *name;
-+ unsigned long rate;
-+};
-+
-+/**
-+ * struct clk_cmn_pll - CMN PLL hardware specific data
-+ * @regmap: hardware regmap.
-+ * @hw: handle between common and hardware-specific interfaces
-+ */
-+struct clk_cmn_pll {
-+ struct regmap *regmap;
-+ struct clk_hw hw;
-+};
-+
-+#define CLK_PLL_OUTPUT(_id, _name, _rate) { \
-+ .id = _id, \
-+ .name = _name, \
-+ .rate = _rate, \
-+}
-+
-+#define to_clk_cmn_pll(_hw) container_of(_hw, struct clk_cmn_pll, hw)
-+
-+static const struct regmap_config ipq_cmn_pll_regmap_config = {
-+ .reg_bits = 32,
-+ .reg_stride = 4,
-+ .val_bits = 32,
-+ .max_register = 0x7fc,
-+ .fast_io = true,
-+};
-+
-+static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = {
-+ CLK_PLL_OUTPUT(XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
-+ CLK_PLL_OUTPUT(SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
-+ CLK_PLL_OUTPUT(PCS_31P25MHZ_CLK, "pcs-31p25mhz", 31250000UL),
-+ CLK_PLL_OUTPUT(NSS_1200MHZ_CLK, "nss-1200mhz", 1200000000UL),
-+ CLK_PLL_OUTPUT(PPE_353MHZ_CLK, "ppe-353mhz", 353000000UL),
-+ CLK_PLL_OUTPUT(ETH0_50MHZ_CLK, "eth0-50mhz", 50000000UL),
-+ CLK_PLL_OUTPUT(ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
-+ CLK_PLL_OUTPUT(ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
-+ CLK_PLL_OUTPUT(ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
-+};
-+
-+/*
-+ * CMN PLL has the single parent clock, which supports the several
-+ * possible parent clock rates, each parent clock rate is reflected
-+ * by the specific reference index value in the hardware.
-+ */
-+static int ipq_cmn_pll_find_freq_index(unsigned long parent_rate)
-+{
-+ int index = -EINVAL;
-+
-+ switch (parent_rate) {
-+ case 25000000:
-+ index = 3;
-+ break;
-+ case 31250000:
-+ index = 4;
-+ break;
-+ case 40000000:
-+ index = 6;
-+ break;
-+ case 48000000:
-+ case 96000000:
-+ /*
-+ * Parent clock rate 48 MHZ and 96 MHZ take the same value
-+ * of reference clock index. 96 MHZ needs the source clock
-+ * divider to be programmed as 2.
-+ */
-+ index = 7;
-+ break;
-+ case 50000000:
-+ index = 8;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ return index;
-+}
-+
-+static unsigned long clk_cmn_pll_recalc_rate(struct clk_hw *hw,
-+ unsigned long parent_rate)
-+{
-+ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
-+ u32 val, factor;
-+
-+ /*
-+ * The value of CMN_PLL_DIVIDER_CTRL_FACTOR is automatically adjusted
-+ * by HW according to the parent clock rate.
-+ */
-+ regmap_read(cmn_pll->regmap, CMN_PLL_DIVIDER_CTRL, &val);
-+ factor = FIELD_GET(CMN_PLL_DIVIDER_CTRL_FACTOR, val);
-+
-+ return parent_rate * 2 * factor;
-+}
-+
-+static int clk_cmn_pll_determine_rate(struct clk_hw *hw,
-+ struct clk_rate_request *req)
-+{
-+ int ret;
-+
-+ /* Validate the rate of the single parent clock. */
-+ ret = ipq_cmn_pll_find_freq_index(req->best_parent_rate);
-+
-+ return ret < 0 ? ret : 0;
-+}
-+
-+/*
-+ * This function is used to initialize the CMN PLL to enable the fixed
-+ * rate output clocks. It is expected to be configured once.
-+ */
-+static int clk_cmn_pll_set_rate(struct clk_hw *hw, unsigned long rate,
-+ unsigned long parent_rate)
-+{
-+ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
-+ int ret, index;
-+ u32 val;
-+
-+ /*
-+ * Configure the reference input clock selection as per the given
-+ * parent clock. The output clock rates are always of fixed value.
-+ */
-+ index = ipq_cmn_pll_find_freq_index(parent_rate);
-+ if (index < 0)
-+ return index;
-+
-+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
-+ CMN_PLL_REFCLK_INDEX,
-+ FIELD_PREP(CMN_PLL_REFCLK_INDEX, index));
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * Update the source clock rate selection and source clock
-+ * divider as 2 when the parent clock rate is 96 MHZ.
-+ */
-+ if (parent_rate == 96000000) {
-+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
-+ CMN_PLL_REFCLK_DIV,
-+ FIELD_PREP(CMN_PLL_REFCLK_DIV, 2));
-+ if (ret)
-+ return ret;
-+
-+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_SRC_SELECTION,
-+ CMN_PLL_REFCLK_SRC_DIV,
-+ FIELD_PREP(CMN_PLL_REFCLK_SRC_DIV, 0));
-+ if (ret)
-+ return ret;
-+ }
-+
-+ /* Enable PLL locked detect. */
-+ ret = regmap_set_bits(cmn_pll->regmap, CMN_PLL_CTRL,
-+ CMN_PLL_CTRL_LOCK_DETECT_EN);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * Reset the CMN PLL block to ensure the updated configurations
-+ * take effect.
-+ */
-+ ret = regmap_clear_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
-+ CMN_ANA_EN_SW_RSTN);
-+ if (ret)
-+ return ret;
-+
-+ usleep_range(1000, 1200);
-+ ret = regmap_set_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
-+ CMN_ANA_EN_SW_RSTN);
-+ if (ret)
-+ return ret;
-+
-+ /* Stability check of CMN PLL output clocks. */
-+ return regmap_read_poll_timeout(cmn_pll->regmap, CMN_PLL_LOCKED, val,
-+ (val & CMN_PLL_CLKS_LOCKED),
-+ 100, 100 * USEC_PER_MSEC);
-+}
-+
-+static const struct clk_ops clk_cmn_pll_ops = {
-+ .recalc_rate = clk_cmn_pll_recalc_rate,
-+ .determine_rate = clk_cmn_pll_determine_rate,
-+ .set_rate = clk_cmn_pll_set_rate,
-+};
-+
-+static struct clk_hw *ipq_cmn_pll_clk_hw_register(struct platform_device *pdev)
-+{
-+ struct clk_parent_data pdata = { .index = 0 };
-+ struct device *dev = &pdev->dev;
-+ struct clk_init_data init = {};
-+ struct clk_cmn_pll *cmn_pll;
-+ struct regmap *regmap;
-+ void __iomem *base;
-+ int ret;
-+
-+ base = devm_platform_ioremap_resource(pdev, 0);
-+ if (IS_ERR(base))
-+ return ERR_CAST(base);
-+
-+ regmap = devm_regmap_init_mmio(dev, base, &ipq_cmn_pll_regmap_config);
-+ if (IS_ERR(regmap))
-+ return ERR_CAST(regmap);
-+
-+ cmn_pll = devm_kzalloc(dev, sizeof(*cmn_pll), GFP_KERNEL);
-+ if (!cmn_pll)
-+ return ERR_PTR(-ENOMEM);
-+
-+ init.name = "cmn_pll";
-+ init.parent_data = &pdata;
-+ init.num_parents = 1;
-+ init.ops = &clk_cmn_pll_ops;
-+
-+ cmn_pll->hw.init = &init;
-+ cmn_pll->regmap = regmap;
-+
-+ ret = devm_clk_hw_register(dev, &cmn_pll->hw);
-+ if (ret)
-+ return ERR_PTR(ret);
-+
-+ return &cmn_pll->hw;
-+}
-+
-+static int ipq_cmn_pll_register_clks(struct platform_device *pdev)
-+{
-+ const struct cmn_pll_fixed_output_clk *fixed_clk;
-+ struct clk_hw_onecell_data *hw_data;
-+ struct device *dev = &pdev->dev;
-+ struct clk_hw *cmn_pll_hw;
-+ unsigned int num_clks;
-+ struct clk_hw *hw;
-+ int ret, i;
-+
-+ fixed_clk = ipq9574_output_clks;
-+ num_clks = ARRAY_SIZE(ipq9574_output_clks);
-+
-+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks + 1),
-+ GFP_KERNEL);
-+ if (!hw_data)
-+ return -ENOMEM;
-+
-+ /*
-+ * Register the CMN PLL clock, which is the parent clock of
-+ * the fixed rate output clocks.
-+ */
-+ cmn_pll_hw = ipq_cmn_pll_clk_hw_register(pdev);
-+ if (IS_ERR(cmn_pll_hw))
-+ return PTR_ERR(cmn_pll_hw);
-+
-+ /* Register the fixed rate output clocks. */
-+ for (i = 0; i < num_clks; i++) {
-+ hw = clk_hw_register_fixed_rate_parent_hw(dev, fixed_clk[i].name,
-+ cmn_pll_hw, 0,
-+ fixed_clk[i].rate);
-+ if (IS_ERR(hw)) {
-+ ret = PTR_ERR(hw);
-+ goto unregister_fixed_clk;
-+ }
-+
-+ hw_data->hws[fixed_clk[i].id] = hw;
-+ }
-+
-+ /*
-+ * Provide the CMN PLL clock. The clock rate of CMN PLL
-+ * is configured to 12 GHZ by DT property assigned-clock-rates-u64.
-+ */
-+ hw_data->hws[CMN_PLL_CLK] = cmn_pll_hw;
-+ hw_data->num = num_clks + 1;
-+
-+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data);
-+ if (ret)
-+ goto unregister_fixed_clk;
-+
-+ platform_set_drvdata(pdev, hw_data);
-+
-+ return 0;
-+
-+unregister_fixed_clk:
-+ while (i > 0)
-+ clk_hw_unregister(hw_data->hws[fixed_clk[--i].id]);
-+
-+ return ret;
-+}
-+
-+static int ipq_cmn_pll_clk_probe(struct platform_device *pdev)
-+{
-+ struct device *dev = &pdev->dev;
-+ int ret;
-+
-+ ret = devm_pm_runtime_enable(dev);
-+ if (ret)
-+ return ret;
-+
-+ ret = devm_pm_clk_create(dev);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * To access the CMN PLL registers, the GCC AHB & SYS clocks
-+ * of CMN PLL block need to be enabled.
-+ */
-+ ret = pm_clk_add(dev, "ahb");
-+ if (ret)
-+ return dev_err_probe(dev, ret, "Fail to add AHB clock\n");
-+
-+ ret = pm_clk_add(dev, "sys");
-+ if (ret)
-+ return dev_err_probe(dev, ret, "Fail to add SYS clock\n");
-+
-+ ret = pm_runtime_resume_and_get(dev);
-+ if (ret)
-+ return ret;
-+
-+ /* Register CMN PLL clock and fixed rate output clocks. */
-+ ret = ipq_cmn_pll_register_clks(pdev);
-+ pm_runtime_put(dev);
-+ if (ret)
-+ return dev_err_probe(dev, ret,
-+ "Fail to register CMN PLL clocks\n");
-+
-+ return 0;
-+}
-+
-+static void ipq_cmn_pll_clk_remove(struct platform_device *pdev)
-+{
-+ struct clk_hw_onecell_data *hw_data = platform_get_drvdata(pdev);
-+ int i;
-+
-+ /*
-+ * The clock with index CMN_PLL_CLK is unregistered by
-+ * device management.
-+ */
-+ for (i = 0; i < hw_data->num; i++) {
-+ if (i != CMN_PLL_CLK)
-+ clk_hw_unregister(hw_data->hws[i]);
-+ }
-+}
-+
-+static const struct dev_pm_ops ipq_cmn_pll_pm_ops = {
-+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
-+};
-+
-+static const struct of_device_id ipq_cmn_pll_clk_ids[] = {
-+ { .compatible = "qcom,ipq9574-cmn-pll", },
-+ { }
-+};
-+MODULE_DEVICE_TABLE(of, ipq_cmn_pll_clk_ids);
-+
-+static struct platform_driver ipq_cmn_pll_clk_driver = {
-+ .probe = ipq_cmn_pll_clk_probe,
-+ .remove_new = ipq_cmn_pll_clk_remove,
-+ .driver = {
-+ .name = "ipq_cmn_pll",
-+ .of_match_table = ipq_cmn_pll_clk_ids,
-+ .pm = &ipq_cmn_pll_pm_ops,
-+ },
-+};
-+module_platform_driver(ipq_cmn_pll_clk_driver);
-+
-+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ CMN PLL Driver");
-+MODULE_LICENSE("GPL");
+++ /dev/null
-From c0f1cbf795095c21b92a46fa1dc47a7b787ce538 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Fri, 3 Jan 2025 15:31:34 +0800
-Subject: [PATCH 1/3] dt-bindings: clock: qcom: Add CMN PLL clock controller
- for IPQ SoC
-
-The CMN PLL controller provides clocks to networking hardware blocks
-and to GCC on Qualcomm IPQ9574 SoC. It receives input clock from the
-on-chip Wi-Fi, and produces output clocks at fixed rates. These output
-rates are predetermined, and are unrelated to the input clock rate.
-The primary purpose of CMN PLL is to supply clocks to the networking
-hardware such as PPE (packet process engine), PCS and the externally
-connected switch or PHY device. The CMN PLL block also outputs fixed
-rate clocks to GCC, such as 24 MHZ as XO clock and 32 KHZ as sleep
-clock supplied to GCC.
-
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
-Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
-Link: https://lore.kernel.org/r/20250103-qcom_ipq_cmnpll-v8-1-c89fb4d4849d@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- .../bindings/clock/qcom,ipq9574-cmn-pll.yaml | 77 +++++++++++++++++++
- include/dt-bindings/clock/qcom,ipq-cmn-pll.h | 22 ++++++
- 2 files changed, 99 insertions(+)
- create mode 100644 Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
- create mode 100644 include/dt-bindings/clock/qcom,ipq-cmn-pll.h
-
---- /dev/null
-+++ b/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
-@@ -0,0 +1,77 @@
-+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
-+%YAML 1.2
-+---
-+$id: http://devicetree.org/schemas/clock/qcom,ipq9574-cmn-pll.yaml#
-+$schema: http://devicetree.org/meta-schemas/core.yaml#
-+
-+title: Qualcomm CMN PLL Clock Controller on IPQ SoC
-+
-+maintainers:
-+ - Bjorn Andersson <andersson@kernel.org>
-+ - Luo Jie <quic_luoj@quicinc.com>
-+
-+description:
-+ The CMN (or common) PLL clock controller expects a reference
-+ input clock. This reference clock is from the on-board Wi-Fi.
-+ The CMN PLL supplies a number of fixed rate output clocks to
-+ the devices providing networking functions and to GCC. These
-+ networking hardware include PPE (packet process engine), PCS
-+ and the externally connected switch or PHY devices. The CMN
-+ PLL block also outputs fixed rate clocks to GCC. The PLL's
-+ primary function is to enable fixed rate output clocks for
-+ networking hardware functions used with the IPQ SoC.
-+
-+properties:
-+ compatible:
-+ enum:
-+ - qcom,ipq9574-cmn-pll
-+
-+ reg:
-+ maxItems: 1
-+
-+ clocks:
-+ items:
-+ - description: The reference clock. The supported clock rates include
-+ 25000000, 31250000, 40000000, 48000000, 50000000 and 96000000 HZ.
-+ - description: The AHB clock
-+ - description: The SYS clock
-+ description:
-+ The reference clock is the source clock of CMN PLL, which is from the
-+ Wi-Fi. The AHB and SYS clocks must be enabled to access CMN PLL
-+ clock registers.
-+
-+ clock-names:
-+ items:
-+ - const: ref
-+ - const: ahb
-+ - const: sys
-+
-+ "#clock-cells":
-+ const: 1
-+
-+required:
-+ - compatible
-+ - reg
-+ - clocks
-+ - clock-names
-+ - "#clock-cells"
-+
-+additionalProperties: false
-+
-+examples:
-+ - |
-+ #include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
-+ #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
-+
-+ cmn_pll: clock-controller@9b000 {
-+ compatible = "qcom,ipq9574-cmn-pll";
-+ reg = <0x0009b000 0x800>;
-+ clocks = <&cmn_pll_ref_clk>,
-+ <&gcc GCC_CMN_12GPLL_AHB_CLK>,
-+ <&gcc GCC_CMN_12GPLL_SYS_CLK>;
-+ clock-names = "ref", "ahb", "sys";
-+ #clock-cells = <1>;
-+ assigned-clocks = <&cmn_pll CMN_PLL_CLK>;
-+ assigned-clock-rates-u64 = /bits/ 64 <12000000000>;
-+ };
-+...
---- /dev/null
-+++ b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h
-@@ -0,0 +1,22 @@
-+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-+/*
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H
-+#define _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H
-+
-+/* CMN PLL core clock. */
-+#define CMN_PLL_CLK 0
-+
-+/* The output clocks from CMN PLL of IPQ9574. */
-+#define XO_24MHZ_CLK 1
-+#define SLEEP_32KHZ_CLK 2
-+#define PCS_31P25MHZ_CLK 3
-+#define NSS_1200MHZ_CLK 4
-+#define PPE_353MHZ_CLK 5
-+#define ETH0_50MHZ_CLK 6
-+#define ETH1_50MHZ_CLK 7
-+#define ETH2_50MHZ_CLK 8
-+#define ETH_25MHZ_CLK 9
-+#endif
+++ /dev/null
-From 758aa2d7e3c0acfe9c952a1cbe6416ec6130c2a1 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Fri, 3 Jan 2025 15:31:37 +0800
-Subject: [PATCH 2/3] arm64: dts: qcom: ipq9574: Add CMN PLL node
-
-The CMN PLL clock controller allows selection of an input clock rate
-from a defined set of input clock rates. It in-turn supplies fixed
-rate output clocks to the hardware blocks that provide the ethernet
-functions such as PPE (Packet Process Engine) and connected switch or
-PHY, and to GCC.
-
-The reference clock of CMN PLL is routed from XO to the CMN PLL through
-the internal WiFi block.
-.XO (48 MHZ or 96 MHZ)-->WiFi (multiplier/divider)-->48 MHZ to CMN PLL.
-
-The reference input clock from WiFi to CMN PLL is fully controlled by
-the bootstrap pins which select the XO frequency (48 MHZ or 96 MHZ).
-Based on this frequency, the divider in the internal Wi-Fi block is
-automatically configured by hardware (1 for 48 MHZ, 2 for 96 MHZ), to
-ensure output clock to CMN PLL is 48 MHZ.
-
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
-Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
-Link: https://lore.kernel.org/r/20250103-qcom_ipq_cmnpll-v8-4-c89fb4d4849d@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- .../boot/dts/qcom/ipq9574-rdp-common.dtsi | 17 +++++++++++-
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 26 ++++++++++++++++++-
- 2 files changed, 41 insertions(+), 2 deletions(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-@@ -3,7 +3,7 @@
- * IPQ9574 RDP board common device tree source
- *
- * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
-- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
-+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
- /dts-v1/;
-@@ -120,6 +120,21 @@
- status = "okay";
- };
-
-+/*
-+ * The bootstrap pins for the board select the XO clock frequency
-+ * (48 MHZ or 96 MHZ used for different RDP type board). This setting
-+ * automatically enables the right dividers, to ensure the reference
-+ * clock output from WiFi to the CMN PLL is 48 MHZ.
-+ */
-+&ref_48mhz_clk {
-+ clock-div = <1>;
-+ clock-mult = <1>;
-+};
-+
- &xo_board_clk {
- clock-frequency = <24000000>;
- };
-+
-+&xo_clk {
-+ clock-frequency = <48000000>;
-+};
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -3,10 +3,11 @@
- * IPQ9574 SoC device tree source
- *
- * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
-- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
-+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
- #include <dt-bindings/clock/qcom,apss-ipq.h>
-+#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
- #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
- #include <dt-bindings/interconnect/qcom,ipq9574.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
-@@ -19,6 +20,12 @@
- #size-cells = <2>;
-
- clocks {
-+ ref_48mhz_clk: ref-48mhz-clk {
-+ compatible = "fixed-factor-clock";
-+ clocks = <&xo_clk>;
-+ #clock-cells = <0>;
-+ };
-+
- sleep_clk: sleep-clk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
-@@ -28,6 +35,11 @@
- compatible = "fixed-clock";
- #clock-cells = <0>;
- };
-+
-+ xo_clk: xo-clk {
-+ compatible = "fixed-clock";
-+ #clock-cells = <0>;
-+ };
- };
-
- cpus {
-@@ -243,6 +255,18 @@
- status = "disabled";
- };
-
-+ cmn_pll: clock-controller@9b000 {
-+ compatible = "qcom,ipq9574-cmn-pll";
-+ reg = <0x0009b000 0x800>;
-+ clocks = <&ref_48mhz_clk>,
-+ <&gcc GCC_CMN_12GPLL_AHB_CLK>,
-+ <&gcc GCC_CMN_12GPLL_SYS_CLK>;
-+ clock-names = "ref", "ahb", "sys";
-+ #clock-cells = <1>;
-+ assigned-clocks = <&cmn_pll CMN_PLL_CLK>;
-+ assigned-clock-rates-u64 = /bits/ 64 <12000000000>;
-+ };
-+
- qfprom: efuse@a4000 {
- compatible = "qcom,ipq9574-qfprom", "qcom,qfprom";
- reg = <0x000a4000 0x5a1>;
+++ /dev/null
-From 050b312654523aac9495eae3cf7bfa868fd981ce Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Fri, 3 Jan 2025 15:31:38 +0800
-Subject: [PATCH 3/3] arm64: dts: qcom: ipq9574: Update xo_board_clk to use
- fixed factor clock
-
-xo_board_clk is fixed to 24 MHZ, which is routed from WiFi output clock
-48 MHZ (also being the reference clock of CMN PLL) divided 2 by analog
-block routing channel.
-
-Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
-Link: https://lore.kernel.org/r/20250103-qcom_ipq_cmnpll-v8-5-c89fb4d4849d@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi | 7 ++++++-
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 3 ++-
- 2 files changed, 8 insertions(+), 2 deletions(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-@@ -131,8 +131,13 @@
- clock-mult = <1>;
- };
-
-+/*
-+ * The frequency of xo_board_clk is fixed to 24 MHZ, which is routed
-+ * from WiFi output clock 48 MHZ divided by 2.
-+ */
- &xo_board_clk {
-- clock-frequency = <24000000>;
-+ clock-div = <2>;
-+ clock-mult = <1>;
- };
-
- &xo_clk {
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -32,7 +32,8 @@
- };
-
- xo_board_clk: xo-board-clk {
-- compatible = "fixed-clock";
-+ compatible = "fixed-factor-clock";
-+ clocks = <&ref_48mhz_clk>;
- #clock-cells = <0>;
- };
-
+++ /dev/null
-From 652935ba05860eadaa19ac9efe7aea61fb7a3aef Mon Sep 17 00:00:00 2001
-From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-Date: Wed, 17 Apr 2024 12:32:53 +0530
-Subject: [PATCH] PCI: qcom: Use devm_clk_bulk_get_all() API
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-There is no need for the device drivers to validate the clocks defined in
-Devicetree. The validation should be performed by the DT schema and the
-drivers should just get all the clocks from DT. Right now the driver
-hardcodes the clock info and validates them against DT which is redundant.
-
-So use devm_clk_bulk_get_all() that just gets all the clocks defined in DT
-and get rid of all static clocks info from the driver. This simplifies the
-driver.
-
-Link: https://lore.kernel.org/linux-pci/20240417-pci-qcom-clk-bulk-v1-1-52ca19b3d6b2@linaro.org
-Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
-Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
----
- drivers/pci/controller/dwc/pcie-qcom.c | 177 ++++++++-----------------
- 1 file changed, 58 insertions(+), 119 deletions(-)
-
---- a/drivers/pci/controller/dwc/pcie-qcom.c
-+++ b/drivers/pci/controller/dwc/pcie-qcom.c
-@@ -151,58 +151,56 @@
-
- #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
-
--#define QCOM_PCIE_1_0_0_MAX_CLOCKS 4
- struct qcom_pcie_resources_1_0_0 {
-- struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS];
-+ struct clk_bulk_data *clks;
-+ int num_clks;
- struct reset_control *core;
- struct regulator *vdda;
- };
-
--#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
- #define QCOM_PCIE_2_1_0_MAX_RESETS 6
- #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
- struct qcom_pcie_resources_2_1_0 {
-- struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
-+ struct clk_bulk_data *clks;
-+ int num_clks;
- struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
- int num_resets;
- struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
- };
-
--#define QCOM_PCIE_2_3_2_MAX_CLOCKS 4
- #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
- struct qcom_pcie_resources_2_3_2 {
-- struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS];
-+ struct clk_bulk_data *clks;
-+ int num_clks;
- struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
- };
-
--#define QCOM_PCIE_2_3_3_MAX_CLOCKS 5
- #define QCOM_PCIE_2_3_3_MAX_RESETS 7
- struct qcom_pcie_resources_2_3_3 {
-- struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS];
-+ struct clk_bulk_data *clks;
-+ int num_clks;
- struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
- };
-
--#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
- #define QCOM_PCIE_2_4_0_MAX_RESETS 12
- struct qcom_pcie_resources_2_4_0 {
-- struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
-+ struct clk_bulk_data *clks;
- int num_clks;
- struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
- int num_resets;
- };
-
--#define QCOM_PCIE_2_7_0_MAX_CLOCKS 15
- #define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
- struct qcom_pcie_resources_2_7_0 {
-- struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS];
-+ struct clk_bulk_data *clks;
- int num_clks;
- struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
- struct reset_control *rst;
- };
-
--#define QCOM_PCIE_2_9_0_MAX_CLOCKS 5
- struct qcom_pcie_resources_2_9_0 {
-- struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS];
-+ struct clk_bulk_data *clks;
-+ int num_clks;
- struct reset_control *rst;
- };
-
-@@ -313,21 +311,11 @@ static int qcom_pcie_get_resources_2_1_0
- if (ret)
- return ret;
-
-- res->clks[0].id = "iface";
-- res->clks[1].id = "core";
-- res->clks[2].id = "phy";
-- res->clks[3].id = "aux";
-- res->clks[4].id = "ref";
--
-- /* iface, core, phy are required */
-- ret = devm_clk_bulk_get(dev, 3, res->clks);
-- if (ret < 0)
-- return ret;
--
-- /* aux, ref are optional */
-- ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
-- if (ret < 0)
-- return ret;
-+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
-+ if (res->num_clks < 0) {
-+ dev_err(dev, "Failed to get clocks\n");
-+ return res->num_clks;
-+ }
-
- res->resets[0].id = "pci";
- res->resets[1].id = "axi";
-@@ -349,7 +337,7 @@ static void qcom_pcie_deinit_2_1_0(struc
- {
- struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
-
-- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
-+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
- reset_control_bulk_assert(res->num_resets, res->resets);
-
- writel(1, pcie->parf + PARF_PHY_CTRL);
-@@ -401,7 +389,7 @@ static int qcom_pcie_post_init_2_1_0(str
- val &= ~PHY_TEST_PWR_DOWN;
- writel(val, pcie->parf + PARF_PHY_CTRL);
-
-- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
-+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
- if (ret)
- return ret;
-
-@@ -452,20 +440,16 @@ static int qcom_pcie_get_resources_1_0_0
- struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
-- int ret;
-
- res->vdda = devm_regulator_get(dev, "vdda");
- if (IS_ERR(res->vdda))
- return PTR_ERR(res->vdda);
-
-- res->clks[0].id = "iface";
-- res->clks[1].id = "aux";
-- res->clks[2].id = "master_bus";
-- res->clks[3].id = "slave_bus";
--
-- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
-- if (ret < 0)
-- return ret;
-+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
-+ if (res->num_clks < 0) {
-+ dev_err(dev, "Failed to get clocks\n");
-+ return res->num_clks;
-+ }
-
- res->core = devm_reset_control_get_exclusive(dev, "core");
- return PTR_ERR_OR_ZERO(res->core);
-@@ -476,7 +460,7 @@ static void qcom_pcie_deinit_1_0_0(struc
- struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
-
- reset_control_assert(res->core);
-- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
-+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
- regulator_disable(res->vdda);
- }
-
-@@ -493,7 +477,7 @@ static int qcom_pcie_init_1_0_0(struct q
- return ret;
- }
-
-- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
-+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
- if (ret) {
- dev_err(dev, "cannot prepare/enable clocks\n");
- goto err_assert_reset;
-@@ -508,7 +492,7 @@ static int qcom_pcie_init_1_0_0(struct q
- return 0;
-
- err_disable_clks:
-- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
-+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
- err_assert_reset:
- reset_control_assert(res->core);
-
-@@ -556,14 +540,11 @@ static int qcom_pcie_get_resources_2_3_2
- if (ret)
- return ret;
-
-- res->clks[0].id = "aux";
-- res->clks[1].id = "cfg";
-- res->clks[2].id = "bus_master";
-- res->clks[3].id = "bus_slave";
--
-- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
-- if (ret < 0)
-- return ret;
-+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
-+ if (res->num_clks < 0) {
-+ dev_err(dev, "Failed to get clocks\n");
-+ return res->num_clks;
-+ }
-
- return 0;
- }
-@@ -572,7 +553,7 @@ static void qcom_pcie_deinit_2_3_2(struc
- {
- struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
-
-- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
-+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
- regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
- }
-
-@@ -589,7 +570,7 @@ static int qcom_pcie_init_2_3_2(struct q
- return ret;
- }
-
-- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
-+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
- if (ret) {
- dev_err(dev, "cannot prepare/enable clocks\n");
- regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
-@@ -637,17 +618,11 @@ static int qcom_pcie_get_resources_2_4_0
- bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
- int ret;
-
-- res->clks[0].id = "aux";
-- res->clks[1].id = "master_bus";
-- res->clks[2].id = "slave_bus";
-- res->clks[3].id = "iface";
--
-- /* qcom,pcie-ipq4019 is defined without "iface" */
-- res->num_clks = is_ipq ? 3 : 4;
--
-- ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
-- if (ret < 0)
-- return ret;
-+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
-+ if (res->num_clks < 0) {
-+ dev_err(dev, "Failed to get clocks\n");
-+ return res->num_clks;
-+ }
-
- res->resets[0].id = "axi_m";
- res->resets[1].id = "axi_s";
-@@ -718,15 +693,11 @@ static int qcom_pcie_get_resources_2_3_3
- struct device *dev = pci->dev;
- int ret;
-
-- res->clks[0].id = "iface";
-- res->clks[1].id = "axi_m";
-- res->clks[2].id = "axi_s";
-- res->clks[3].id = "ahb";
-- res->clks[4].id = "aux";
--
-- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
-- if (ret < 0)
-- return ret;
-+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
-+ if (res->num_clks < 0) {
-+ dev_err(dev, "Failed to get clocks\n");
-+ return res->num_clks;
-+ }
-
- res->rst[0].id = "axi_m";
- res->rst[1].id = "axi_s";
-@@ -747,7 +718,7 @@ static void qcom_pcie_deinit_2_3_3(struc
- {
- struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
-
-- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
-+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
- }
-
- static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
-@@ -777,7 +748,7 @@ static int qcom_pcie_init_2_3_3(struct q
- */
- usleep_range(2000, 2500);
-
-- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
-+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
- if (ret) {
- dev_err(dev, "cannot prepare/enable clocks\n");
- goto err_assert_resets;
-@@ -838,8 +809,6 @@ static int qcom_pcie_get_resources_2_7_0
- struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
-- unsigned int num_clks, num_opt_clks;
-- unsigned int idx;
- int ret;
-
- res->rst = devm_reset_control_array_get_exclusive(dev);
-@@ -853,36 +822,11 @@ static int qcom_pcie_get_resources_2_7_0
- if (ret)
- return ret;
-
-- idx = 0;
-- res->clks[idx++].id = "aux";
-- res->clks[idx++].id = "cfg";
-- res->clks[idx++].id = "bus_master";
-- res->clks[idx++].id = "bus_slave";
-- res->clks[idx++].id = "slave_q2a";
--
-- num_clks = idx;
--
-- ret = devm_clk_bulk_get(dev, num_clks, res->clks);
-- if (ret < 0)
-- return ret;
--
-- res->clks[idx++].id = "tbu";
-- res->clks[idx++].id = "ddrss_sf_tbu";
-- res->clks[idx++].id = "aggre0";
-- res->clks[idx++].id = "aggre1";
-- res->clks[idx++].id = "noc_aggr";
-- res->clks[idx++].id = "noc_aggr_4";
-- res->clks[idx++].id = "noc_aggr_south_sf";
-- res->clks[idx++].id = "cnoc_qx";
-- res->clks[idx++].id = "sleep";
-- res->clks[idx++].id = "cnoc_sf_axi";
--
-- num_opt_clks = idx - num_clks;
-- res->num_clks = idx;
--
-- ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
-- if (ret < 0)
-- return ret;
-+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
-+ if (res->num_clks < 0) {
-+ dev_err(dev, "Failed to get clocks\n");
-+ return res->num_clks;
-+ }
-
- return 0;
- }
-@@ -1073,17 +1017,12 @@ static int qcom_pcie_get_resources_2_9_0
- struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
-- int ret;
--
-- res->clks[0].id = "iface";
-- res->clks[1].id = "axi_m";
-- res->clks[2].id = "axi_s";
-- res->clks[3].id = "axi_bridge";
-- res->clks[4].id = "rchng";
-
-- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
-- if (ret < 0)
-- return ret;
-+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
-+ if (res->num_clks < 0) {
-+ dev_err(dev, "Failed to get clocks\n");
-+ return res->num_clks;
-+ }
-
- res->rst = devm_reset_control_array_get_exclusive(dev);
- if (IS_ERR(res->rst))
-@@ -1096,7 +1035,7 @@ static void qcom_pcie_deinit_2_9_0(struc
- {
- struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
-
-- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
-+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
- }
-
- static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
-@@ -1125,7 +1064,7 @@ static int qcom_pcie_init_2_9_0(struct q
-
- usleep_range(2000, 2500);
-
-- return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
-+ return clk_bulk_prepare_enable(res->num_clks, res->clks);
- }
-
- static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
+++ /dev/null
-From 10ba0854c5e6165b58e17bda5fb671e729fecf9e Mon Sep 17 00:00:00 2001
-From: Prudhvi Yarlagadda <quic_pyarlaga@quicinc.com>
-Date: Wed, 14 Aug 2024 15:03:38 -0700
-Subject: [PATCH] PCI: qcom: Disable mirroring of DBI and iATU register space
- in BAR region
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-PARF hardware block which is a wrapper on top of DWC PCIe controller
-mirrors the DBI and ATU register space. It uses PARF_SLV_ADDR_SPACE_SIZE
-register to get the size of the memory block to be mirrored and uses
-PARF_DBI_BASE_ADDR, PARF_ATU_BASE_ADDR registers to determine the base
-address of DBI and ATU space inside the memory block that is being
-mirrored.
-
-When a memory region which is located above the SLV_ADDR_SPACE_SIZE
-boundary is used for BAR region then there could be an overlap of DBI and
-ATU address space that is getting mirrored and the BAR region. This
-results in DBI and ATU address space contents getting updated when a PCIe
-function driver tries updating the BAR/MMIO memory region. Reference
-memory map of the PCIe memory region with DBI and ATU address space
-overlapping BAR region is as below.
-
- |---------------|
- | |
- | |
- ------- --------|---------------|
- | | |---------------|
- | | | DBI |
- | | |---------------|---->DBI_BASE_ADDR
- | | | |
- | | | |
- | PCIe | |---->2*SLV_ADDR_SPACE_SIZE
- | BAR/MMIO|---------------|
- | Region | ATU |
- | | |---------------|---->ATU_BASE_ADDR
- | | | |
- PCIe | |---------------|
- Memory | | DBI |
- Region | |---------------|---->DBI_BASE_ADDR
- | | | |
- | --------| |
- | | |---->SLV_ADDR_SPACE_SIZE
- | |---------------|
- | | ATU |
- | |---------------|---->ATU_BASE_ADDR
- | | |
- | |---------------|
- | | DBI |
- | |---------------|---->DBI_BASE_ADDR
- | | |
- | | |
- ----------------|---------------|
- | |
- | |
- | |
- |---------------|
-
-Currently memory region beyond the SLV_ADDR_SPACE_SIZE boundary is not
-used for BAR region which is why the above mentioned issue is not
-encountered. This issue is discovered as part of internal testing when we
-tried moving the BAR region beyond the SLV_ADDR_SPACE_SIZE boundary. Hence
-we are trying to fix this.
-
-As PARF hardware block mirrors DBI and ATU register space after every
-PARF_SLV_ADDR_SPACE_SIZE (default 0x1000000) boundary multiple, program
-maximum possible size to this register by writing 0x80000000 to it(it
-considers only powers of 2 as values) to avoid mirroring DBI and ATU to
-BAR/MMIO region. Write the physical base address of DBI and ATU register
-blocks to PARF_DBI_BASE_ADDR (default 0x0) and PARF_ATU_BASE_ADDR (default
-0x1000) respectively to make sure DBI and ATU blocks are at expected
-memory locations.
-
-The register offsets PARF_DBI_BASE_ADDR_V2, PARF_SLV_ADDR_SPACE_SIZE_V2
-and PARF_ATU_BASE_ADDR are applicable for platforms that use Qcom IP
-rev 1.9.0, 2.7.0 and 2.9.0. PARF_DBI_BASE_ADDR_V2 and
-PARF_SLV_ADDR_SPACE_SIZE_V2 are applicable for Qcom IP rev 2.3.3.
-PARF_DBI_BASE_ADDR and PARF_SLV_ADDR_SPACE_SIZE are applicable for Qcom
-IP rev 1.0.0, 2.3.2 and 2.4.0. Update init()/post_init() functions of the
-respective Qcom IP versions to program applicable PARF_DBI_BASE_ADDR,
-PARF_SLV_ADDR_SPACE_SIZE and PARF_ATU_BASE_ADDR register offsets. Update
-the SLV_ADDR_SPACE_SZ macro to 0x80000000 to set highest bit in
-PARF_SLV_ADDR_SPACE_SIZE register.
-
-Cache DBI and iATU physical addresses in 'struct dw_pcie' so that
-pcie_qcom.c driver can program these addresses in the PARF_DBI_BASE_ADDR
-and PARF_ATU_BASE_ADDR registers.
-
-Suggested-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-Link: https://lore.kernel.org/linux-pci/20240814220338.1969668-1-quic_pyarlaga@quicinc.com
-Signed-off-by: Prudhvi Yarlagadda <quic_pyarlaga@quicinc.com>
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
-Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-Reviewed-by: Mayank Rana <quic_mrana@quicinc.com>
----
- drivers/pci/controller/dwc/pcie-designware.c | 2 +
- drivers/pci/controller/dwc/pcie-designware.h | 2 +
- drivers/pci/controller/dwc/pcie-qcom.c | 72 ++++++++++++++++----
- 3 files changed, 61 insertions(+), 15 deletions(-)
-
---- a/drivers/pci/controller/dwc/pcie-designware.c
-+++ b/drivers/pci/controller/dwc/pcie-designware.c
-@@ -112,6 +112,7 @@ int dw_pcie_get_resources(struct dw_pcie
- pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-+ pci->dbi_phys_addr = res->start;
- }
-
- /* DBI2 is mainly useful for the endpoint controller */
-@@ -134,6 +135,7 @@ int dw_pcie_get_resources(struct dw_pcie
- pci->atu_base = devm_ioremap_resource(pci->dev, res);
- if (IS_ERR(pci->atu_base))
- return PTR_ERR(pci->atu_base);
-+ pci->atu_phys_addr = res->start;
- } else {
- pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
- }
---- a/drivers/pci/controller/dwc/pcie-designware.h
-+++ b/drivers/pci/controller/dwc/pcie-designware.h
-@@ -384,8 +384,10 @@ struct dw_pcie_ops {
- struct dw_pcie {
- struct device *dev;
- void __iomem *dbi_base;
-+ resource_size_t dbi_phys_addr;
- void __iomem *dbi_base2;
- void __iomem *atu_base;
-+ resource_size_t atu_phys_addr;
- size_t atu_size;
- u32 num_ib_windows;
- u32 num_ob_windows;
---- a/drivers/pci/controller/dwc/pcie-qcom.c
-+++ b/drivers/pci/controller/dwc/pcie-qcom.c
-@@ -43,6 +43,7 @@
- #define PARF_PHY_REFCLK 0x4c
- #define PARF_CONFIG_BITS 0x50
- #define PARF_DBI_BASE_ADDR 0x168
-+#define PARF_SLV_ADDR_SPACE_SIZE 0x16c
- #define PARF_MHI_CLOCK_RESET_CTRL 0x174
- #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
- #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
-@@ -50,7 +51,12 @@
- #define PARF_LTSSM 0x1b0
- #define PARF_SID_OFFSET 0x234
- #define PARF_BDF_TRANSLATE_CFG 0x24c
--#define PARF_SLV_ADDR_SPACE_SIZE 0x358
-+#define PARF_DBI_BASE_ADDR_V2 0x350
-+#define PARF_DBI_BASE_ADDR_V2_HI 0x354
-+#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
-+#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
-+#define PARF_ATU_BASE_ADDR 0x634
-+#define PARF_ATU_BASE_ADDR_HI 0x638
- #define PARF_DEVICE_TYPE 0x1000
- #define PARF_BDF_TO_SID_TABLE_N 0x2000
- #define PARF_BDF_TO_SID_CFG 0x2c00
-@@ -105,7 +111,7 @@
- #define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x)
-
- /* PARF_SLV_ADDR_SPACE_SIZE register value */
--#define SLV_ADDR_SPACE_SZ 0x10000000
-+#define SLV_ADDR_SPACE_SZ 0x80000000
-
- /* PARF_MHI_CLOCK_RESET_CTRL register fields */
- #define AHB_CLK_EN BIT(0)
-@@ -285,6 +291,50 @@ static void qcom_pcie_clear_hpc(struct d
- dw_pcie_dbi_ro_wr_dis(pci);
- }
-
-+static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie)
-+{
-+ struct dw_pcie *pci = pcie->pci;
-+
-+ if (pci->dbi_phys_addr) {
-+ /*
-+ * PARF_DBI_BASE_ADDR register is in CPU domain and require to
-+ * be programmed with CPU physical address.
-+ */
-+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
-+ PARF_DBI_BASE_ADDR);
-+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
-+ PARF_SLV_ADDR_SPACE_SIZE);
-+ }
-+}
-+
-+static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
-+{
-+ struct dw_pcie *pci = pcie->pci;
-+
-+ if (pci->dbi_phys_addr) {
-+ /*
-+ * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are
-+ * in CPU domain and require to be programmed with CPU
-+ * physical addresses.
-+ */
-+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
-+ PARF_DBI_BASE_ADDR_V2);
-+ writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf +
-+ PARF_DBI_BASE_ADDR_V2_HI);
-+
-+ if (pci->atu_phys_addr) {
-+ writel(lower_32_bits(pci->atu_phys_addr), pcie->parf +
-+ PARF_ATU_BASE_ADDR);
-+ writel(upper_32_bits(pci->atu_phys_addr), pcie->parf +
-+ PARF_ATU_BASE_ADDR_HI);
-+ }
-+
-+ writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2);
-+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
-+ PARF_SLV_ADDR_SPACE_SIZE_V2_HI);
-+ }
-+}
-+
- static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
- {
- u32 val;
-@@ -501,8 +551,7 @@ err_assert_reset:
-
- static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
- {
-- /* change DBI base address */
-- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
-+ qcom_pcie_configure_dbi_base(pcie);
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
-@@ -589,8 +638,7 @@ static int qcom_pcie_post_init_2_3_2(str
- val &= ~PHY_TEST_PWR_DOWN;
- writel(val, pcie->parf + PARF_PHY_CTRL);
-
-- /* change DBI base address */
-- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
-+ qcom_pcie_configure_dbi_base(pcie);
-
- /* MAC PHY_POWERDOWN MUX DISABLE */
- val = readl(pcie->parf + PARF_SYS_CTRL);
-@@ -772,13 +820,11 @@ static int qcom_pcie_post_init_2_3_3(str
- u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
- u32 val;
-
-- writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
--
- val = readl(pcie->parf + PARF_PHY_CTRL);
- val &= ~PHY_TEST_PWR_DOWN;
- writel(val, pcie->parf + PARF_PHY_CTRL);
-
-- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
-+ qcom_pcie_configure_dbi_atu_base(pcie);
-
- writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
- | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
-@@ -874,8 +920,7 @@ static int qcom_pcie_init_2_7_0(struct q
- val &= ~PHY_TEST_PWR_DOWN;
- writel(val, pcie->parf + PARF_PHY_CTRL);
-
-- /* change DBI base address */
-- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
-+ qcom_pcie_configure_dbi_atu_base(pcie);
-
- /* MAC PHY_POWERDOWN MUX DISABLE */
- val = readl(pcie->parf + PARF_SYS_CTRL);
-@@ -1074,14 +1119,11 @@ static int qcom_pcie_post_init_2_9_0(str
- u32 val;
- int i;
-
-- writel(SLV_ADDR_SPACE_SZ,
-- pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
--
- val = readl(pcie->parf + PARF_PHY_CTRL);
- val &= ~PHY_TEST_PWR_DOWN;
- writel(val, pcie->parf + PARF_PHY_CTRL);
-
-- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
-+ qcom_pcie_configure_dbi_atu_base(pcie);
-
- writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
- writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
+++ /dev/null
-From f1aaa788b997ba8a7810da0696e89fd3f79ecce3 Mon Sep 17 00:00:00 2001
-From: devi priya <quic_devipriy@quicinc.com>
-Date: Thu, 16 May 2024 08:54:34 +0530
-Subject: [PATCH 1/3] phy: qcom-qmp: Add missing offsets for Qserdes PLL
- registers.
-
-Add missing register offsets for Qserdes PLL.
-
-Reviewed-by: Abel Vesa <abel.vesa@linaro.org>
-Signed-off-by: devi priya <quic_devipriy@quicinc.com>
-Link: https://lore.kernel.org/r/20240516032436.2681828-3-quic_devipriy@quicinc.com
-Signed-off-by: Vinod Koul <vkoul@kernel.org>
----
- drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h | 3 +++
- 1 file changed, 3 insertions(+)
-
---- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h
-+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h
-@@ -8,6 +8,9 @@
-
- /* QMP V2 PHY for PCIE gen3 ports - QSERDES PLL registers */
- #define QSERDES_PLL_BG_TIMER 0x00c
-+#define QSERDES_PLL_SSC_EN_CENTER 0x010
-+#define QSERDES_PLL_SSC_ADJ_PER1 0x014
-+#define QSERDES_PLL_SSC_ADJ_PER2 0x018
- #define QSERDES_PLL_SSC_PER1 0x01c
- #define QSERDES_PLL_SSC_PER2 0x020
- #define QSERDES_PLL_SSC_STEP_SIZE1_MODE0 0x024
+++ /dev/null
-From 71ae2acf1d7542ecd21c6933cae8fe65d550074b Mon Sep 17 00:00:00 2001
-From: devi priya <quic_devipriy@quicinc.com>
-Date: Thu, 16 May 2024 08:54:35 +0530
-Subject: [PATCH 2/3] phy: qcom-qmp: Add missing register definitions for PCS
- V5
-
-Add missing register offsets for PCS V5 registers.
-
-Reviewed-by: Abel Vesa <abel.vesa@linaro.org>
-Signed-off-by: devi priya <quic_devipriy@quicinc.com>
-Link: https://lore.kernel.org/r/20240516032436.2681828-4-quic_devipriy@quicinc.com
-Signed-off-by: Vinod Koul <vkoul@kernel.org>
----
- drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
---- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h
-+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h
-@@ -11,8 +11,22 @@
- #define QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG2 0x0c
- #define QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG4 0x14
- #define QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20
-+#define QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L 0x44
-+#define QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H 0x48
-+#define QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L 0x4c
-+#define QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H 0x50
- #define QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x54
-+#define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG1 0x5c
-+#define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG2 0x60
-+#define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG4 0x68
-+#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2 0x7c
-+#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4 0x84
-+#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5 0x88
-+#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6 0x8c
- #define QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS 0x94
-+#define QPHY_V5_PCS_PCIE_EQ_CONFIG1 0xa4
- #define QPHY_V5_PCS_PCIE_EQ_CONFIG2 0xa8
-+#define QPHY_V5_PCS_PCIE_PRESET_P10_PRE 0xc0
-+#define QPHY_V5_PCS_PCIE_PRESET_P10_POST 0xe4
-
- #endif
+++ /dev/null
-From 2f2f5c13cc5ea87f1dd2debfd06fe5f624e5c0fd Mon Sep 17 00:00:00 2001
-From: devi priya <quic_devipriy@quicinc.com>
-Date: Thu, 16 May 2024 08:54:36 +0530
-Subject: [PATCH 3/3] phy: qcom-qmp-pcie: Add support for IPQ9574 g3x1 and g3x2
- PCIEs
-
-Add support for a single-lane and two-lane PCIe PHYs
-found on Qualcomm IPQ9574 platform.
-
-Reviewed-by: Abel Vesa <abel.vesa@linaro.org>
-Co-developed-by: Anusha Rao <quic_anusha@quicinc.com>
-Signed-off-by: Anusha Rao <quic_anusha@quicinc.com>
-Signed-off-by: devi priya <quic_devipriy@quicinc.com>
-Link: https://lore.kernel.org/r/20240516032436.2681828-5-quic_devipriy@quicinc.com
-Signed-off-by: Vinod Koul <vkoul@kernel.org>
----
- drivers/phy/qualcomm/phy-qcom-qmp-pcie.c | 309 +++++++++++++++++++++++
- 1 file changed, 309 insertions(+)
-
---- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
-+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
-@@ -514,6 +514,243 @@ static const struct qmp_phy_init_tbl ipq
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
- };
-
-+static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_serdes_tbl[] = {
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x31),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x01),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_MAP, 0x04),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER1, 0xff),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER2, 0x3f),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x30),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x21),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE0, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x20),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV, 0x0a),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TIMER, 0x0a),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE1, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_EN_CENTER, 0x01),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER1, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER2, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE0, 0x19),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE1, 0x28),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x89),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x10),
-+};
-+
-+static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_serdes_tbl[] = {
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x31),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x01),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_MAP, 0x04),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER1, 0xff),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER2, 0x3f),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x30),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x21),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE0, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV, 0x0a),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TIMER, 0x0a),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE1, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_EN_CENTER, 0x01),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER1, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER2, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE0, 0x19),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE1, 0x28),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x89),
-+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x10),
-+};
-+
-+static const struct qmp_phy_init_tbl ipq9574_pcie_rx_tbl[] = {
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x61),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1e),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x02),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x73),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x80),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x02),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x09),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0x02),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x09),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xf0),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x02),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x2f),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xd3),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x40),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
-+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
-+};
-+
-+static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_pcs_tbl[] = {
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10),
-+};
-+
-+static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_pcs_misc_tbl[] = {
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2, 0x0d),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x14),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x10),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG2, 0x0b),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG2, 0x52),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x50),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x1a),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x06),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6, 0x03),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
-+};
-+
-+static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_pcs_tbl[] = {
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01),
-+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
-+};
-+
-+static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_pcs_misc_tbl[] = {
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG2, 0x1d),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG1, 0x14),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG1, 0x10),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0b),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_PRESET_P10_PRE, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_PRESET_P10_POST, 0x58),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG1, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG2, 0x52),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG4, 0x19),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x49),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x2a),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x02),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6, 0x03),
-+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
-+};
-+
- static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
-@@ -2354,6 +2591,16 @@ static const struct qmp_pcie_offsets qmp
- .rx2 = 0x1800,
- };
-
-+static const struct qmp_pcie_offsets qmp_pcie_offsets_ipq9574 = {
-+ .serdes = 0,
-+ .pcs = 0x1000,
-+ .pcs_misc = 0x1400,
-+ .tx = 0x0200,
-+ .rx = 0x0400,
-+ .tx2 = 0x0600,
-+ .rx2 = 0x0800,
-+};
-+
- static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_20 = {
- .serdes = 0x1000,
- .pcs = 0x1200,
-@@ -2466,6 +2713,62 @@ static const struct qmp_phy_cfg ipq6018_
- .phy_status = PHYSTATUS,
- };
-
-+static const struct qmp_phy_cfg ipq9574_gen3x1_pciephy_cfg = {
-+ .lanes = 1,
-+
-+ .offsets = &qmp_pcie_offsets_v4x1,
-+
-+ .tbls = {
-+ .serdes = ipq9574_gen3x1_pcie_serdes_tbl,
-+ .serdes_num = ARRAY_SIZE(ipq9574_gen3x1_pcie_serdes_tbl),
-+ .tx = ipq8074_pcie_gen3_tx_tbl,
-+ .tx_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl),
-+ .rx = ipq9574_pcie_rx_tbl,
-+ .rx_num = ARRAY_SIZE(ipq9574_pcie_rx_tbl),
-+ .pcs = ipq9574_gen3x1_pcie_pcs_tbl,
-+ .pcs_num = ARRAY_SIZE(ipq9574_gen3x1_pcie_pcs_tbl),
-+ .pcs_misc = ipq9574_gen3x1_pcie_pcs_misc_tbl,
-+ .pcs_misc_num = ARRAY_SIZE(ipq9574_gen3x1_pcie_pcs_misc_tbl),
-+ },
-+ .reset_list = ipq8074_pciephy_reset_l,
-+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
-+ .vreg_list = NULL,
-+ .num_vregs = 0,
-+ .regs = pciephy_v4_regs_layout,
-+
-+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
-+ .phy_status = PHYSTATUS,
-+ .pipe_clock_rate = 250000000,
-+};
-+
-+static const struct qmp_phy_cfg ipq9574_gen3x2_pciephy_cfg = {
-+ .lanes = 2,
-+
-+ .offsets = &qmp_pcie_offsets_ipq9574,
-+
-+ .tbls = {
-+ .serdes = ipq9574_gen3x2_pcie_serdes_tbl,
-+ .serdes_num = ARRAY_SIZE(ipq9574_gen3x2_pcie_serdes_tbl),
-+ .tx = ipq8074_pcie_gen3_tx_tbl,
-+ .tx_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl),
-+ .rx = ipq9574_pcie_rx_tbl,
-+ .rx_num = ARRAY_SIZE(ipq9574_pcie_rx_tbl),
-+ .pcs = ipq9574_gen3x2_pcie_pcs_tbl,
-+ .pcs_num = ARRAY_SIZE(ipq9574_gen3x2_pcie_pcs_tbl),
-+ .pcs_misc = ipq9574_gen3x2_pcie_pcs_misc_tbl,
-+ .pcs_misc_num = ARRAY_SIZE(ipq9574_gen3x2_pcie_pcs_misc_tbl),
-+ },
-+ .reset_list = ipq8074_pciephy_reset_l,
-+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
-+ .vreg_list = NULL,
-+ .num_vregs = 0,
-+ .regs = pciephy_v5_regs_layout,
-+
-+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
-+ .phy_status = PHYSTATUS,
-+ .pipe_clock_rate = 250000000,
-+};
-+
- static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
- .lanes = 1,
-
-@@ -3718,6 +4021,12 @@ static const struct of_device_id qmp_pci
- .compatible = "qcom,ipq8074-qmp-pcie-phy",
- .data = &ipq8074_pciephy_cfg,
- }, {
-+ .compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy",
-+ .data = &ipq9574_gen3x1_pciephy_cfg,
-+ }, {
-+ .compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy",
-+ .data = &ipq9574_gen3x2_pciephy_cfg,
-+ }, {
- .compatible = "qcom,msm8998-qmp-pcie-phy",
- .data = &msm8998_pciephy_cfg,
- }, {
+++ /dev/null
-From d80c7fbfa908e3d893a1ea7fe178dfa82ed66bf1 Mon Sep 17 00:00:00 2001
-From: devi priya <quic_devipriy@quicinc.com>
-Date: Thu, 1 Aug 2024 11:18:01 +0530
-Subject: [PATCH 1/2] arm64: dts: qcom: ipq9574: Add PCIe PHYs and controller
- nodes
-
-Add PCIe0, PCIe1, PCIe2, PCIe3 (and corresponding PHY) devices
-found on IPQ9574 platform. The PCIe0 & PCIe1 are 1-lane Gen3
-host whereas PCIe2 & PCIe3 are 2-lane Gen3 host.
-
-Signed-off-by: devi priya <quic_devipriy@quicinc.com>
-Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
-Link: https://lore.kernel.org/r/20240801054803.3015572-3-quic_srichara@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 420 +++++++++++++++++++++++++-
- 1 file changed, 416 insertions(+), 4 deletions(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -239,6 +239,52 @@
- reg = <0x00060000 0x6000>;
- };
-
-+ pcie0_phy: phy@84000 {
-+ compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy";
-+ reg = <0x00084000 0x1000>;
-+
-+ clocks = <&gcc GCC_PCIE0_AUX_CLK>,
-+ <&gcc GCC_PCIE0_AHB_CLK>,
-+ <&gcc GCC_PCIE0_PIPE_CLK>;
-+ clock-names = "aux", "cfg_ahb", "pipe";
-+
-+ assigned-clocks = <&gcc GCC_PCIE0_AUX_CLK>;
-+ assigned-clock-rates = <20000000>;
-+
-+ resets = <&gcc GCC_PCIE0_PHY_BCR>,
-+ <&gcc GCC_PCIE0PHY_PHY_BCR>;
-+ reset-names = "phy", "common";
-+
-+ #clock-cells = <0>;
-+ clock-output-names = "gcc_pcie0_pipe_clk_src";
-+
-+ #phy-cells = <0>;
-+ status = "disabled";
-+ };
-+
-+ pcie2_phy: phy@8c000 {
-+ compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy";
-+ reg = <0x0008c000 0x2000>;
-+
-+ clocks = <&gcc GCC_PCIE2_AUX_CLK>,
-+ <&gcc GCC_PCIE2_AHB_CLK>,
-+ <&gcc GCC_PCIE2_PIPE_CLK>;
-+ clock-names = "aux", "cfg_ahb", "pipe";
-+
-+ assigned-clocks = <&gcc GCC_PCIE2_AUX_CLK>;
-+ assigned-clock-rates = <20000000>;
-+
-+ resets = <&gcc GCC_PCIE2_PHY_BCR>,
-+ <&gcc GCC_PCIE2PHY_PHY_BCR>;
-+ reset-names = "phy", "common";
-+
-+ #clock-cells = <0>;
-+ clock-output-names = "gcc_pcie2_pipe_clk_src";
-+
-+ #phy-cells = <0>;
-+ status = "disabled";
-+ };
-+
- rng: rng@e3000 {
- compatible = "qcom,prng-ee";
- reg = <0x000e3000 0x1000>;
-@@ -268,6 +314,52 @@
- assigned-clock-rates-u64 = /bits/ 64 <12000000000>;
- };
-
-+ pcie3_phy: phy@f4000 {
-+ compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy";
-+ reg = <0x000f4000 0x2000>;
-+
-+ clocks = <&gcc GCC_PCIE3_AUX_CLK>,
-+ <&gcc GCC_PCIE3_AHB_CLK>,
-+ <&gcc GCC_PCIE3_PIPE_CLK>;
-+ clock-names = "aux", "cfg_ahb", "pipe";
-+
-+ assigned-clocks = <&gcc GCC_PCIE3_AUX_CLK>;
-+ assigned-clock-rates = <20000000>;
-+
-+ resets = <&gcc GCC_PCIE3_PHY_BCR>,
-+ <&gcc GCC_PCIE3PHY_PHY_BCR>;
-+ reset-names = "phy", "common";
-+
-+ #clock-cells = <0>;
-+ clock-output-names = "gcc_pcie3_pipe_clk_src";
-+
-+ #phy-cells = <0>;
-+ status = "disabled";
-+ };
-+
-+ pcie1_phy: phy@fc000 {
-+ compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy";
-+ reg = <0x000fc000 0x1000>;
-+
-+ clocks = <&gcc GCC_PCIE1_AUX_CLK>,
-+ <&gcc GCC_PCIE1_AHB_CLK>,
-+ <&gcc GCC_PCIE1_PIPE_CLK>;
-+ clock-names = "aux", "cfg_ahb", "pipe";
-+
-+ assigned-clocks = <&gcc GCC_PCIE1_AUX_CLK>;
-+ assigned-clock-rates = <20000000>;
-+
-+ resets = <&gcc GCC_PCIE1_PHY_BCR>,
-+ <&gcc GCC_PCIE1PHY_PHY_BCR>;
-+ reset-names = "phy", "common";
-+
-+ #clock-cells = <0>;
-+ clock-output-names = "gcc_pcie1_pipe_clk_src";
-+
-+ #phy-cells = <0>;
-+ status = "disabled";
-+ };
-+
- qfprom: efuse@a4000 {
- compatible = "qcom,ipq9574-qfprom", "qcom,qfprom";
- reg = <0x000a4000 0x5a1>;
-@@ -334,10 +426,10 @@
- clocks = <&xo_board_clk>,
- <&sleep_clk>,
- <0>,
-- <0>,
-- <0>,
-- <0>,
-- <0>,
-+ <&pcie0_phy>,
-+ <&pcie1_phy>,
-+ <&pcie2_phy>,
-+ <&pcie3_phy>,
- <0>;
- #clock-cells = <1>;
- #reset-cells = <1>;
-@@ -777,6 +869,326 @@
- status = "disabled";
- };
- };
-+
-+ pcie1: pcie@10000000 {
-+ compatible = "qcom,pcie-ipq9574";
-+ reg = <0x10000000 0xf1d>,
-+ <0x10000f20 0xa8>,
-+ <0x10001000 0x1000>,
-+ <0x000f8000 0x4000>,
-+ <0x10100000 0x1000>;
-+ reg-names = "dbi", "elbi", "atu", "parf", "config";
-+ device_type = "pci";
-+ linux,pci-domain = <1>;
-+ bus-range = <0x00 0xff>;
-+ num-lanes = <1>;
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+
-+ ranges = <0x01000000 0x0 0x00000000 0x10200000 0x0 0x100000>,
-+ <0x02000000 0x0 0x10300000 0x10300000 0x0 0x7d00000>;
-+
-+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
-+ interrupt-names = "msi0",
-+ "msi1",
-+ "msi2",
-+ "msi3",
-+ "msi4",
-+ "msi5",
-+ "msi6",
-+ "msi7";
-+
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 0x7>;
-+ interrupt-map = <0 0 0 1 &intc 0 0 35 IRQ_TYPE_LEVEL_HIGH>,
-+ <0 0 0 2 &intc 0 0 49 IRQ_TYPE_LEVEL_HIGH>,
-+ <0 0 0 3 &intc 0 0 84 IRQ_TYPE_LEVEL_HIGH>,
-+ <0 0 0 4 &intc 0 0 85 IRQ_TYPE_LEVEL_HIGH>;
-+
-+ clocks = <&gcc GCC_PCIE1_AXI_M_CLK>,
-+ <&gcc GCC_PCIE1_AXI_S_CLK>,
-+ <&gcc GCC_PCIE1_AXI_S_BRIDGE_CLK>,
-+ <&gcc GCC_PCIE1_RCHNG_CLK>,
-+ <&gcc GCC_PCIE1_AHB_CLK>,
-+ <&gcc GCC_PCIE1_AUX_CLK>;
-+ clock-names = "axi_m",
-+ "axi_s",
-+ "axi_bridge",
-+ "rchng",
-+ "ahb",
-+ "aux";
-+
-+ resets = <&gcc GCC_PCIE1_PIPE_ARES>,
-+ <&gcc GCC_PCIE1_CORE_STICKY_ARES>,
-+ <&gcc GCC_PCIE1_AXI_S_STICKY_ARES>,
-+ <&gcc GCC_PCIE1_AXI_S_ARES>,
-+ <&gcc GCC_PCIE1_AXI_M_STICKY_ARES>,
-+ <&gcc GCC_PCIE1_AXI_M_ARES>,
-+ <&gcc GCC_PCIE1_AUX_ARES>,
-+ <&gcc GCC_PCIE1_AHB_ARES>;
-+ reset-names = "pipe",
-+ "sticky",
-+ "axi_s_sticky",
-+ "axi_s",
-+ "axi_m_sticky",
-+ "axi_m",
-+ "aux",
-+ "ahb";
-+
-+ phys = <&pcie1_phy>;
-+ phy-names = "pciephy";
-+ interconnects = <&gcc MASTER_ANOC_PCIE1 &gcc SLAVE_ANOC_PCIE1>,
-+ <&gcc MASTER_SNOC_PCIE1 &gcc SLAVE_SNOC_PCIE1>;
-+ interconnect-names = "pcie-mem", "cpu-pcie";
-+ status = "disabled";
-+ };
-+
-+ pcie3: pcie@18000000 {
-+ compatible = "qcom,pcie-ipq9574";
-+ reg = <0x18000000 0xf1d>,
-+ <0x18000f20 0xa8>,
-+ <0x18001000 0x1000>,
-+ <0x000f0000 0x4000>,
-+ <0x18100000 0x1000>;
-+ reg-names = "dbi", "elbi", "atu", "parf", "config";
-+ device_type = "pci";
-+ linux,pci-domain = <3>;
-+ bus-range = <0x00 0xff>;
-+ num-lanes = <2>;
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+
-+ ranges = <0x01000000 0x0 0x00000000 0x18200000 0x0 0x100000>,
-+ <0x02000000 0x0 0x18300000 0x18300000 0x0 0x7d00000>;
-+
-+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
-+ interrupt-names = "msi0",
-+ "msi1",
-+ "msi2",
-+ "msi3",
-+ "msi4",
-+ "msi5",
-+ "msi6",
-+ "msi7";
-+
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 0x7>;
-+ interrupt-map = <0 0 0 1 &intc 0 0 189 IRQ_TYPE_LEVEL_HIGH>,
-+ <0 0 0 2 &intc 0 0 190 IRQ_TYPE_LEVEL_HIGH>,
-+ <0 0 0 3 &intc 0 0 191 IRQ_TYPE_LEVEL_HIGH>,
-+ <0 0 0 4 &intc 0 0 192 IRQ_TYPE_LEVEL_HIGH>;
-+
-+ clocks = <&gcc GCC_PCIE3_AXI_M_CLK>,
-+ <&gcc GCC_PCIE3_AXI_S_CLK>,
-+ <&gcc GCC_PCIE3_AXI_S_BRIDGE_CLK>,
-+ <&gcc GCC_PCIE3_RCHNG_CLK>,
-+ <&gcc GCC_PCIE3_AHB_CLK>,
-+ <&gcc GCC_PCIE3_AUX_CLK>;
-+ clock-names = "axi_m",
-+ "axi_s",
-+ "axi_bridge",
-+ "rchng",
-+ "ahb",
-+ "aux";
-+
-+ resets = <&gcc GCC_PCIE3_PIPE_ARES>,
-+ <&gcc GCC_PCIE3_CORE_STICKY_ARES>,
-+ <&gcc GCC_PCIE3_AXI_S_STICKY_ARES>,
-+ <&gcc GCC_PCIE3_AXI_S_ARES>,
-+ <&gcc GCC_PCIE3_AXI_M_STICKY_ARES>,
-+ <&gcc GCC_PCIE3_AXI_M_ARES>,
-+ <&gcc GCC_PCIE3_AUX_ARES>,
-+ <&gcc GCC_PCIE3_AHB_ARES>;
-+ reset-names = "pipe",
-+ "sticky",
-+ "axi_s_sticky",
-+ "axi_s",
-+ "axi_m_sticky",
-+ "axi_m",
-+ "aux",
-+ "ahb";
-+
-+ phys = <&pcie3_phy>;
-+ phy-names = "pciephy";
-+ interconnects = <&gcc MASTER_ANOC_PCIE3 &gcc SLAVE_ANOC_PCIE3>,
-+ <&gcc MASTER_SNOC_PCIE3 &gcc SLAVE_SNOC_PCIE3>;
-+ interconnect-names = "pcie-mem", "cpu-pcie";
-+ status = "disabled";
-+ };
-+
-+ pcie2: pcie@20000000 {
-+ compatible = "qcom,pcie-ipq9574";
-+ reg = <0x20000000 0xf1d>,
-+ <0x20000f20 0xa8>,
-+ <0x20001000 0x1000>,
-+ <0x00088000 0x4000>,
-+ <0x20100000 0x1000>;
-+ reg-names = "dbi", "elbi", "atu", "parf", "config";
-+ device_type = "pci";
-+ linux,pci-domain = <2>;
-+ bus-range = <0x00 0xff>;
-+ num-lanes = <2>;
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+
-+ ranges = <0x01000000 0x0 0x00000000 0x20200000 0x0 0x100000>,
-+ <0x02000000 0x0 0x20300000 0x20300000 0x0 0x7d00000>;
-+
-+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
-+ interrupt-names = "msi0",
-+ "msi1",
-+ "msi2",
-+ "msi3",
-+ "msi4",
-+ "msi5",
-+ "msi6",
-+ "msi7";
-+
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 0x7>;
-+ interrupt-map = <0 0 0 1 &intc 0 0 164 IRQ_TYPE_LEVEL_HIGH>,
-+ <0 0 0 2 &intc 0 0 165 IRQ_TYPE_LEVEL_HIGH>,
-+ <0 0 0 3 &intc 0 0 186 IRQ_TYPE_LEVEL_HIGH>,
-+ <0 0 0 4 &intc 0 0 187 IRQ_TYPE_LEVEL_HIGH>;
-+
-+ clocks = <&gcc GCC_PCIE2_AXI_M_CLK>,
-+ <&gcc GCC_PCIE2_AXI_S_CLK>,
-+ <&gcc GCC_PCIE2_AXI_S_BRIDGE_CLK>,
-+ <&gcc GCC_PCIE2_RCHNG_CLK>,
-+ <&gcc GCC_PCIE2_AHB_CLK>,
-+ <&gcc GCC_PCIE2_AUX_CLK>;
-+ clock-names = "axi_m",
-+ "axi_s",
-+ "axi_bridge",
-+ "rchng",
-+ "ahb",
-+ "aux";
-+
-+ resets = <&gcc GCC_PCIE2_PIPE_ARES>,
-+ <&gcc GCC_PCIE2_CORE_STICKY_ARES>,
-+ <&gcc GCC_PCIE2_AXI_S_STICKY_ARES>,
-+ <&gcc GCC_PCIE2_AXI_S_ARES>,
-+ <&gcc GCC_PCIE2_AXI_M_STICKY_ARES>,
-+ <&gcc GCC_PCIE2_AXI_M_ARES>,
-+ <&gcc GCC_PCIE2_AUX_ARES>,
-+ <&gcc GCC_PCIE2_AHB_ARES>;
-+ reset-names = "pipe",
-+ "sticky",
-+ "axi_s_sticky",
-+ "axi_s",
-+ "axi_m_sticky",
-+ "axi_m",
-+ "aux",
-+ "ahb";
-+
-+ phys = <&pcie2_phy>;
-+ phy-names = "pciephy";
-+ interconnects = <&gcc MASTER_ANOC_PCIE2 &gcc SLAVE_ANOC_PCIE2>,
-+ <&gcc MASTER_SNOC_PCIE2 &gcc SLAVE_SNOC_PCIE2>;
-+ interconnect-names = "pcie-mem", "cpu-pcie";
-+ status = "disabled";
-+ };
-+
-+ pcie0: pci@28000000 {
-+ compatible = "qcom,pcie-ipq9574";
-+ reg = <0x28000000 0xf1d>,
-+ <0x28000f20 0xa8>,
-+ <0x28001000 0x1000>,
-+ <0x00080000 0x4000>,
-+ <0x28100000 0x1000>;
-+ reg-names = "dbi", "elbi", "atu", "parf", "config";
-+ device_type = "pci";
-+ linux,pci-domain = <0>;
-+ bus-range = <0x00 0xff>;
-+ num-lanes = <1>;
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+
-+ ranges = <0x01000000 0x0 0x00000000 0x28200000 0x0 0x100000>,
-+ <0x02000000 0x0 0x28300000 0x28300000 0x0 0x7d00000>;
-+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
-+ interrupt-names = "msi0",
-+ "msi1",
-+ "msi2",
-+ "msi3",
-+ "msi4",
-+ "msi5",
-+ "msi6",
-+ "msi7";
-+
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 0x7>;
-+ interrupt-map = <0 0 0 1 &intc 0 0 75 IRQ_TYPE_LEVEL_HIGH>,
-+ <0 0 0 2 &intc 0 0 78 IRQ_TYPE_LEVEL_HIGH>,
-+ <0 0 0 3 &intc 0 0 79 IRQ_TYPE_LEVEL_HIGH>,
-+ <0 0 0 4 &intc 0 0 83 IRQ_TYPE_LEVEL_HIGH>;
-+
-+ clocks = <&gcc GCC_PCIE0_AXI_M_CLK>,
-+ <&gcc GCC_PCIE0_AXI_S_CLK>,
-+ <&gcc GCC_PCIE0_AXI_S_BRIDGE_CLK>,
-+ <&gcc GCC_PCIE0_RCHNG_CLK>,
-+ <&gcc GCC_PCIE0_AHB_CLK>,
-+ <&gcc GCC_PCIE0_AUX_CLK>;
-+ clock-names = "axi_m",
-+ "axi_s",
-+ "axi_bridge",
-+ "rchng",
-+ "ahb",
-+ "aux";
-+
-+ resets = <&gcc GCC_PCIE0_PIPE_ARES>,
-+ <&gcc GCC_PCIE0_CORE_STICKY_ARES>,
-+ <&gcc GCC_PCIE0_AXI_S_STICKY_ARES>,
-+ <&gcc GCC_PCIE0_AXI_S_ARES>,
-+ <&gcc GCC_PCIE0_AXI_M_STICKY_ARES>,
-+ <&gcc GCC_PCIE0_AXI_M_ARES>,
-+ <&gcc GCC_PCIE0_AUX_ARES>,
-+ <&gcc GCC_PCIE0_AHB_ARES>;
-+ reset-names = "pipe",
-+ "sticky",
-+ "axi_s_sticky",
-+ "axi_s",
-+ "axi_m_sticky",
-+ "axi_m",
-+ "aux",
-+ "ahb";
-+
-+ phys = <&pcie0_phy>;
-+ phy-names = "pciephy";
-+ interconnects = <&gcc MASTER_ANOC_PCIE0 &gcc SLAVE_ANOC_PCIE0>,
-+ <&gcc MASTER_SNOC_PCIE0 &gcc SLAVE_SNOC_PCIE0>;
-+ interconnect-names = "pcie-mem", "cpu-pcie";
-+ status = "disabled";
-+ };
-+
- };
-
- thermal-zones {
+++ /dev/null
-From 438d05fb9be6bcd565e713c7e8d9ffb97e5f8d1e Mon Sep 17 00:00:00 2001
-From: devi priya <quic_devipriy@quicinc.com>
-Date: Thu, 1 Aug 2024 11:18:02 +0530
-Subject: [PATCH 2/2] arm64: dts: qcom: ipq9574: Enable PCIe PHYs and
- controllers
-
-Enable the PCIe controller and PHY nodes corresponding to RDP 433.
-
-Signed-off-by: devi priya <quic_devipriy@quicinc.com>
-Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
-Link: https://lore.kernel.org/r/20240801054803.3015572-4-quic_srichara@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts | 113 ++++++++++++++++++++
- 1 file changed, 113 insertions(+)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
-@@ -8,6 +8,7 @@
-
- /dts-v1/;
-
-+#include <dt-bindings/gpio/gpio.h>
- #include "ipq9574-rdp-common.dtsi"
-
- / {
-@@ -15,6 +16,45 @@
- compatible = "qcom,ipq9574-ap-al02-c7", "qcom,ipq9574";
- };
-
-+&pcie1_phy {
-+ status = "okay";
-+};
-+
-+&pcie1 {
-+ pinctrl-0 = <&pcie1_default>;
-+ pinctrl-names = "default";
-+
-+ perst-gpios = <&tlmm 26 GPIO_ACTIVE_LOW>;
-+ wake-gpios = <&tlmm 27 GPIO_ACTIVE_LOW>;
-+ status = "okay";
-+};
-+
-+&pcie2_phy {
-+ status = "okay";
-+};
-+
-+&pcie2 {
-+ pinctrl-0 = <&pcie2_default>;
-+ pinctrl-names = "default";
-+
-+ perst-gpios = <&tlmm 29 GPIO_ACTIVE_LOW>;
-+ wake-gpios = <&tlmm 30 GPIO_ACTIVE_LOW>;
-+ status = "okay";
-+};
-+
-+&pcie3_phy {
-+ status = "okay";
-+};
-+
-+&pcie3 {
-+ pinctrl-0 = <&pcie3_default>;
-+ pinctrl-names = "default";
-+
-+ perst-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
-+ wake-gpios = <&tlmm 33 GPIO_ACTIVE_LOW>;
-+ status = "okay";
-+};
-+
- &sdhc_1 {
- pinctrl-0 = <&sdc_default_state>;
- pinctrl-names = "default";
-@@ -28,6 +68,79 @@
- };
-
- &tlmm {
-+
-+ pcie1_default: pcie1-default-state {
-+ clkreq-n-pins {
-+ pins = "gpio25";
-+ function = "pcie1_clk";
-+ drive-strength = <6>;
-+ bias-pull-up;
-+ };
-+
-+ perst-n-pins {
-+ pins = "gpio26";
-+ function = "gpio";
-+ drive-strength = <8>;
-+ bias-pull-down;
-+ output-low;
-+ };
-+
-+ wake-n-pins {
-+ pins = "gpio27";
-+ function = "pcie1_wake";
-+ drive-strength = <6>;
-+ bias-pull-up;
-+ };
-+ };
-+
-+ pcie2_default: pcie2-default-state {
-+ clkreq-n-pins {
-+ pins = "gpio28";
-+ function = "pcie2_clk";
-+ drive-strength = <6>;
-+ bias-pull-up;
-+ };
-+
-+ perst-n-pins {
-+ pins = "gpio29";
-+ function = "gpio";
-+ drive-strength = <8>;
-+ bias-pull-down;
-+ output-low;
-+ };
-+
-+ wake-n-pins {
-+ pins = "gpio30";
-+ function = "pcie2_wake";
-+ drive-strength = <6>;
-+ bias-pull-up;
-+ };
-+ };
-+
-+ pcie3_default: pcie3-default-state {
-+ clkreq-n-pins {
-+ pins = "gpio31";
-+ function = "pcie3_clk";
-+ drive-strength = <6>;
-+ bias-pull-up;
-+ };
-+
-+ perst-n-pins {
-+ pins = "gpio32";
-+ function = "gpio";
-+ drive-strength = <8>;
-+ bias-pull-up;
-+ output-low;
-+ };
-+
-+ wake-n-pins {
-+ pins = "gpio33";
-+ function = "pcie3_wake";
-+ drive-strength = <6>;
-+ bias-pull-up;
-+ };
-+ };
-+
- sdc_default_state: sdc-default-state {
- clk-pins {
- pins = "gpio5";
+++ /dev/null
-From 980136d1c2b95644b96df6c7ec00ca5d7c87f37f Mon Sep 17 00:00:00 2001
-From: Krishna chaitanya chundru <quic_krichai@quicinc.com>
-Date: Wed, 19 Jun 2024 20:41:10 +0530
-Subject: [PATCH] PCI: qcom: Add ICC bandwidth vote for CPU to PCIe path
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-To access the host controller registers of the host controller and the
-endpoint BAR/config space, the CPU-PCIe ICC (interconnect) path should
-be voted otherwise it may lead to NoC (Network on chip) timeout.
-We are surviving because of other driver voting for this path.
-
-As there is less access on this path compared to PCIe to mem path
-add minimum vote i.e 1KBps bandwidth always which is sufficient enough
-to keep the path active and is recommended by HW team.
-
-During S2RAM (Suspend-to-RAM), the DBI access can happen very late (while
-disabling the boot CPU). So do not disable the CPU-PCIe interconnect path
-during S2RAM as that may lead to NoC error.
-
-Link: https://lore.kernel.org/linux-pci/20240619-opp_support-v15-1-aa769a2173a3@quicinc.com
-Signed-off-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
-Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
-Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
-Reviewed-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
-Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
----
- drivers/pci/controller/dwc/pcie-qcom.c | 45 +++++++++++++++++++++++++++++++---
- 1 file changed, 41 insertions(+), 4 deletions(-)
-
---- a/drivers/pci/controller/dwc/pcie-qcom.c
-+++ b/drivers/pci/controller/dwc/pcie-qcom.c
-@@ -245,6 +245,7 @@ struct qcom_pcie {
- struct phy *phy;
- struct gpio_desc *reset;
- struct icc_path *icc_mem;
-+ struct icc_path *icc_cpu;
- const struct qcom_pcie_cfg *cfg;
- struct dentry *debugfs;
- bool suspended;
-@@ -1357,6 +1358,9 @@ static int qcom_pcie_icc_init(struct qco
- if (IS_ERR(pcie->icc_mem))
- return PTR_ERR(pcie->icc_mem);
-
-+ pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
-+ if (IS_ERR(pcie->icc_cpu))
-+ return PTR_ERR(pcie->icc_cpu);
- /*
- * Some Qualcomm platforms require interconnect bandwidth constraints
- * to be set before enabling interconnect clocks.
-@@ -1366,11 +1370,25 @@ static int qcom_pcie_icc_init(struct qco
- */
- ret = icc_set_bw(pcie->icc_mem, 0, MBps_to_icc(250));
- if (ret) {
-- dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
-+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
- ret);
- return ret;
- }
-
-+ /*
-+ * Since the CPU-PCIe path is only used for activities like register
-+ * access of the host controller and endpoint Config/BAR space access,
-+ * HW team has recommended to use a minimal bandwidth of 1KBps just to
-+ * keep the path active.
-+ */
-+ ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
-+ if (ret) {
-+ dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
-+ ret);
-+ icc_set_bw(pcie->icc_mem, 0, 0);
-+ return ret;
-+ }
-+
- return 0;
- }
-
-@@ -1411,7 +1429,7 @@ static void qcom_pcie_icc_update(struct
-
- ret = icc_set_bw(pcie->icc_mem, 0, width * bw);
- if (ret) {
-- dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
-+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
- ret);
- }
- }
-@@ -1573,7 +1591,7 @@ static int qcom_pcie_suspend_noirq(struc
- */
- ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
- if (ret) {
-- dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret);
-+ dev_err(dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n", ret);
- return ret;
- }
-
-@@ -1597,7 +1615,18 @@ static int qcom_pcie_suspend_noirq(struc
- pcie->suspended = true;
- }
-
-- return 0;
-+ /*
-+ * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
-+ * Because on some platforms, DBI access can happen very late during the
-+ * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
-+ * error.
-+ */
-+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
-+ ret = icc_disable(pcie->icc_cpu);
-+ if (ret)
-+ dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
-+ }
-+ return ret;
- }
-
- static int qcom_pcie_resume_noirq(struct device *dev)
-@@ -1605,6 +1634,14 @@ static int qcom_pcie_resume_noirq(struct
- struct qcom_pcie *pcie = dev_get_drvdata(dev);
- int ret;
-
-+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
-+ ret = icc_enable(pcie->icc_cpu);
-+ if (ret) {
-+ dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
-+ return ret;
-+ }
-+ }
-+
- if (pcie->suspended) {
- ret = qcom_pcie_host_init(&pcie->pci->pp);
- if (ret)
+++ /dev/null
-From c87d58bc7f831bf3d887e6ec846246cb673c2e50 Mon Sep 17 00:00:00 2001
-From: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
-Date: Thu, 13 Mar 2025 12:44:22 +0530
-Subject: [PATCH] arm64: dts: qcom: ipq9574: fix the msi interrupt numbers of
- pcie3
-
-The MSI interrupt numbers of the PCIe3 controller are incorrect. Due
-to this, the functional bring up of the QDSP6 processor on the PCIe
-endpoint has failed. Correct the MSI interrupt numbers to properly
-bring up the QDSP6 processor on the PCIe endpoint.
-
-Fixes: d80c7fbfa908 ("arm64: dts: qcom: ipq9574: Add PCIe PHYs and controller nodes")
-Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
-Link: https://lore.kernel.org/r/20250313071422.510-1-quic_mmanikan@quicinc.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 16 ++++++++--------
- 1 file changed, 8 insertions(+), 8 deletions(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -968,14 +968,14 @@
- ranges = <0x01000000 0x0 0x00000000 0x18200000 0x0 0x100000>,
- <0x02000000 0x0 0x18300000 0x18300000 0x0 0x7d00000>;
-
-- interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
-+ interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 415 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 494 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 495 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "msi0",
- "msi1",
- "msi2",
+++ /dev/null
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-To: <broonie@kernel.org>, <robh@kernel.org>, <krzk+dt@kernel.org>,
- <conor+dt@kernel.org>, <andersson@kernel.org>,
- <konradybcio@kernel.org>, <miquel.raynal@bootlin.com>,
- <richard@nod.at>, <vigneshr@ti.com>,
- <manivannan.sadhasivam@linaro.org>,
- <linux-arm-msm@vger.kernel.org>, <linux-spi@vger.kernel.org>,
- <devicetree@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
- <linux-mtd@lists.infradead.org>
-Cc: <quic_srichara@quicinc.com>, <quic_varada@quicinc.com>,
- <quic_mdalam@quicinc.com>
-Subject: [PATCH v14 7/8] arm64: dts: qcom: ipq9574: Add SPI nand support
-Date: Wed, 20 Nov 2024 14:45:05 +0530 [thread overview]
-Message-ID: <20241120091507.1404368-8-quic_mdalam@quicinc.com> (raw)
-In-Reply-To: <20241120091507.1404368-1-quic_mdalam@quicinc.com>
-
-Add SPI NAND support for ipq9574 SoC.
-
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
----
-
-Change in [v14]
-
-* No change
-
-Change in [v13]
-
-* No change
-
-Change in [v12]
-
-* No change
-
-Change in [v11]
-
-* No change
-
-Change in [v10]
-
-* No change
-
-Change in [v9]
-
-* No change
-
-Change in [v8]
-
-* No change
-
-Change in [v7]
-
-* No change
-
-Change in [v6]
-
-* No change
-
-Change in [v5]
-
-* No change
-
-Change in [v4]
-
-* No change
-
-Change in [v3]
-
-* Updated gpio number as per pin control driver
-
-* Fixed alignment issue
-
-Change in [v2]
-
-* Added initial enablement for spi-nand
-
-Change in [v1]
-
-* Posted as RFC patch for design review
-
- .../boot/dts/qcom/ipq9574-rdp-common.dtsi | 43 +++++++++++++++++++
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 27 ++++++++++++
- 2 files changed, 70 insertions(+)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-@@ -95,6 +95,49 @@
- drive-strength = <8>;
- bias-disable;
- };
-+
-+ qpic_snand_default_state: qpic-snand-default-state {
-+ clock-pins {
-+ pins = "gpio5";
-+ function = "qspi_clk";
-+ drive-strength = <8>;
-+ bias-disable;
-+ };
-+
-+ cs-pins {
-+ pins = "gpio4";
-+ function = "qspi_cs";
-+ drive-strength = <8>;
-+ bias-disable;
-+ };
-+
-+ data-pins {
-+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
-+ function = "qspi_data";
-+ drive-strength = <8>;
-+ bias-disable;
-+ };
-+ };
-+};
-+
-+&qpic_bam {
-+ status = "okay";
-+};
-+
-+&qpic_nand {
-+ pinctrl-0 = <&qpic_snand_default_state>;
-+ pinctrl-names = "default";
-+ status = "okay";
-+
-+ flash@0 {
-+ compatible = "spi-nand";
-+ reg = <0>;
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ nand-ecc-engine = <&qpic_nand>;
-+ nand-ecc-strength = <4>;
-+ nand-ecc-step-size = <512>;
-+ };
- };
-
- &usb_0_dwc3 {
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -447,6 +447,33 @@
- reg = <0x01937000 0x21000>;
- };
-
-+ qpic_bam: dma-controller@7984000 {
-+ compatible = "qcom,bam-v1.7.0";
-+ reg = <0x7984000 0x1c000>;
-+ interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&gcc GCC_QPIC_AHB_CLK>;
-+ clock-names = "bam_clk";
-+ #dma-cells = <1>;
-+ qcom,ee = <0>;
-+ status = "disabled";
-+ };
-+
-+ qpic_nand: spi@79b0000 {
-+ compatible = "qcom,ipq9574-snand";
-+ reg = <0x79b0000 0x10000>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ clocks = <&gcc GCC_QPIC_CLK>,
-+ <&gcc GCC_QPIC_AHB_CLK>,
-+ <&gcc GCC_QPIC_IO_MACRO_CLK>;
-+ clock-names = "core", "aon", "iom";
-+ dmas = <&qpic_bam 0>,
-+ <&qpic_bam 1>,
-+ <&qpic_bam 2>;
-+ dma-names = "tx", "rx", "cmd";
-+ status = "disabled";
-+ };
-+
- sdhc_1: mmc@7804000 {
- compatible = "qcom,ipq9574-sdhci", "qcom,sdhci-msm-v5";
- reg = <0x07804000 0x1000>, <0x07805000 0x1000>;
+++ /dev/null
-From a28a71e2a4728ec4f1f4a6b28595b664a1a49e4b Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Wed, 7 Feb 2024 16:05:27 +0530
-Subject: [PATCH v10 8/8] arm64: dts: qcom: ipq9574: Disable eMMC node
-
-Disable eMMC node for rdp433, since rdp433
-default boot mode is norplusnand
-
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
----
-
-Change in [v10]
-
-* No change
-
-Change in [v9]
-
-* No change
-
-Change in [v8]
-
-* No change
-
-Change in [v7]
-
-* No Change
-
-Change in [v6]
-
-* Updated commit message
-
-Change in [v5]
-
-* No Change
-
-Change in [v4]
-
-* No change
-
-Change in [v3]
-
-* Removed co-developed by
-
-Change in [v2]
-
-* Posted as initial eMMC disable patch
-
-Change in [v1]
-
-* This patch was not included in v1
-
- arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
-@@ -64,7 +64,7 @@
- mmc-hs400-enhanced-strobe;
- max-frequency = <384000000>;
- bus-width = <8>;
-- status = "okay";
-+ status = "disabled";
- };
-
- &tlmm {
+++ /dev/null
-From 9e76817056937645205f23ee91e762d5cff5e848 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Mon, 29 Jan 2024 17:57:20 +0800
-Subject: [PATCH 01/50] dt-bindings: net: Document Qualcomm QCA8084 PHY package
-
-QCA8084 is quad PHY chip, which integrates 4 PHYs, 2 PCS
-interfaces (PCS0 and PCS1) and clock controller, which can
-also be integrated to the switch chip named as QCA8386.
-
-1. MDIO address of 4 PHYs, 2 PCS and 1 XPCS (PCS1 includes
- PCS and XPCS, PCS0 includes PCS) can be configured.
-2. The package mode of PHY is optionally configured for the
- interface mode of two PCSes working correctly.
-3. The package level clock and reset need to be initialized.
-4. The clock and reset per PHY device need to be initialized
- so that the PHY register can be accessed.
-
-Change-Id: Idb2338d2673152cbd3c57e95968faa59e9d4a80f
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- .../devicetree/bindings/net/qcom,qca8084.yaml | 198 ++++++++++++++++++
- include/dt-bindings/net/qcom,qca808x.h | 14 ++
- 2 files changed, 212 insertions(+)
- create mode 100644 Documentation/devicetree/bindings/net/qcom,qca8084.yaml
- create mode 100644 include/dt-bindings/net/qcom,qca808x.h
-
---- /dev/null
-+++ b/Documentation/devicetree/bindings/net/qcom,qca8084.yaml
-@@ -0,0 +1,198 @@
-+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-+%YAML 1.2
-+---
-+$id: http://devicetree.org/schemas/net/qcom,qca8084.yaml#
-+$schema: http://devicetree.org/meta-schemas/core.yaml#
-+
-+title: Qualcomm QCA8084 Ethernet Quad PHY
-+
-+maintainers:
-+ - Luo Jie <quic_luoj@quicinc.com>
-+
-+description:
-+ Qualcomm QCA8084 is a four-port Ethernet transceiver, the
-+ Ethernet port supports link speed 10/100/1000/2500 Mbps.
-+ There are two PCSes (PCS0 and PCS1) integrated in the PHY
-+ package, PCS1 includes XPCS and PCS to support the interface
-+ mode 10G-QXGMII and SGMII, PCS0 includes a PCS to support the
-+ interface mode SGMII only. There is also a clock controller
-+ integrated in the PHY package. This four-port Ethernet
-+ transceiver can also be integrated to the switch chip named
-+ as QCA8386. The PHY package mode needs to be configured as the
-+ correct value to apply the interface mode of two PCSes as
-+ mentioned below.
-+
-+ QCA8084 expects an input reference clock 50 MHZ as the clock
-+ source of the integrated clock controller, the integrated
-+ clock controller supplies the clocks and resets to the
-+ integrated PHY, PCS and PHY package.
-+
-+ - |
-+ +--| |--+-------------------+--| |--+
-+ | PCS1 |<------------+---->| PCS0 |
-+ +-------+ | +-------+
-+ | | |
-+ Ref 50M clk +--------+ | |
-+ ------------>| | clk & rst | |
-+ GPIO Reset |QCA8K_CC+------------+ |
-+ ------------>| | | |
-+ +--------+ | |
-+ | V |
-+ +--------+--------+--------+--------+
-+ | PHY0 | PHY1 | PHY2 | PHY3 |
-+ +--------+--------+--------+--------+
-+
-+$ref: ethernet-phy-package.yaml#
-+
-+properties:
-+ compatible:
-+ const: qcom,qca8084-package
-+
-+ clocks:
-+ description: PHY package level initial common clocks, which are
-+ needed to be enabled after GPIO reset on the PHY package, these
-+ clocks are supplied from the PHY integrated clock controller
-+ (QCA8K-CC).
-+ items:
-+ - description: APB bridge clock
-+ - description: AHB clock
-+ - description: Security control clock
-+ - description: TLMM clock
-+ - description: TLMM AHB clock
-+ - description: CNOC AHB clock
-+ - description: MDIO AHB clock
-+
-+ clock-names:
-+ items:
-+ - const: apb_bridge
-+ - const: ahb
-+ - const: sec_ctrl_ahb
-+ - const: tlmm
-+ - const: tlmm_ahb
-+ - const: cnoc_ahb
-+ - const: mdio_ahb
-+
-+ resets:
-+ description: PHY package level initial common reset, which are
-+ needed to be deasserted after GPIO reset on the PHY package,
-+ this reset is provided by the PHY integrated clock controller
-+ to do PHY DSP reset.
-+ maxItems: 1
-+
-+ qcom,package-mode:
-+ description: |
-+ The package mode of PHY supports to be configured as 3 modes
-+ to apply the combinations of interface mode of two PCSes
-+ correctly. This value should use one of the values defined in
-+ dt-bindings/net/qcom,qca808x.h. The package mode 10G-QXGMII of
-+ Quad PHY is used by default.
-+
-+ package mode PCS1 PCS0
-+ phy mode (0) 10G-QXGMII for not used
-+ PHY0-PHY3
-+
-+ switch mode (1) SGMII for SGMII for
-+ switch MAC0 switch MAC5 (optional)
-+
-+ switch bypass MAC5 (2) SGMII for SGMII for
-+ switch MAC0 PHY3
-+ $ref: /schemas/types.yaml#/definitions/uint32
-+ enum: [0, 1, 2]
-+ default: 0
-+
-+ qcom,phy-addr-fixup:
-+ description: MDIO address for PHY0-PHY3, PCS0 and PCS1 including
-+ PCS and XPCS, which can be optionally customized by programming
-+ the security control register of PHY package. The hardware default
-+ MDIO address of PHY0-PHY3, PCS0 and PCS1 including PCS and XPCS is
-+ 0-6.
-+ $ref: /schemas/types.yaml#/definitions/uint32-array
-+ minItems: 7
-+ maxItems: 7
-+
-+patternProperties:
-+ ^ethernet-phy(@[a-f0-9]+)?$:
-+ $ref: ethernet-phy.yaml#
-+
-+ properties:
-+ compatible:
-+ const: ethernet-phy-id004d.d180
-+
-+ required:
-+ - compatible
-+ - reg
-+ - clocks
-+ - resets
-+
-+ unevaluatedProperties: false
-+
-+required:
-+ - compatible
-+ - clocks
-+ - clock-names
-+ - resets
-+
-+unevaluatedProperties: false
-+
-+examples:
-+ - |
-+ #include <dt-bindings/clock/qcom,qca8k-nsscc.h>
-+ #include <dt-bindings/net/qcom,qca808x.h>
-+ #include <dt-bindings/reset/qcom,qca8k-nsscc.h>
-+
-+ mdio {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ ethernet-phy-package@1 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ compatible = "qcom,qca8084-package";
-+ reg = <1>;
-+ clocks = <&qca8k_nsscc NSS_CC_APB_BRIDGE_CLK>,
-+ <&qca8k_nsscc NSS_CC_AHB_CLK>,
-+ <&qca8k_nsscc NSS_CC_SEC_CTRL_AHB_CLK>,
-+ <&qca8k_nsscc NSS_CC_TLMM_CLK>,
-+ <&qca8k_nsscc NSS_CC_TLMM_AHB_CLK>,
-+ <&qca8k_nsscc NSS_CC_CNOC_AHB_CLK>,
-+ <&qca8k_nsscc NSS_CC_MDIO_AHB_CLK>;
-+ clock-names = "apb_bridge",
-+ "ahb",
-+ "sec_ctrl_ahb",
-+ "tlmm",
-+ "tlmm_ahb",
-+ "cnoc_ahb",
-+ "mdio_ahb";
-+ resets = <&qca8k_nsscc NSS_CC_GEPHY_FULL_ARES>;
-+ qcom,package-mode = <QCA808X_PCS1_SGMII_MAC_PCS0_SGMII_MAC>;
-+ qcom,phy-addr-fixup = <1 2 3 4 5 6 7>;
-+
-+ ethernet-phy@1 {
-+ compatible = "ethernet-phy-id004d.d180";
-+ reg = <1>;
-+ clocks = <&qca8k_nsscc NSS_CC_GEPHY0_SYS_CLK>;
-+ resets = <&qca8k_nsscc NSS_CC_GEPHY0_SYS_ARES>;
-+ };
-+
-+ ethernet-phy@2 {
-+ compatible = "ethernet-phy-id004d.d180";
-+ reg = <2>;
-+ clocks = <&qca8k_nsscc NSS_CC_GEPHY1_SYS_CLK>;
-+ resets = <&qca8k_nsscc NSS_CC_GEPHY1_SYS_ARES>;
-+ };
-+
-+ ethernet-phy@3 {
-+ compatible = "ethernet-phy-id004d.d180";
-+ reg = <3>;
-+ clocks = <&qca8k_nsscc NSS_CC_GEPHY2_SYS_CLK>;
-+ resets = <&qca8k_nsscc NSS_CC_GEPHY2_SYS_ARES>;
-+ };
-+
-+ ethernet-phy@4 {
-+ compatible = "ethernet-phy-id004d.d180";
-+ reg = <4>;
-+ clocks = <&qca8k_nsscc NSS_CC_GEPHY3_SYS_CLK>;
-+ resets = <&qca8k_nsscc NSS_CC_GEPHY3_SYS_ARES>;
-+ };
-+ };
-+ };
---- /dev/null
-+++ b/include/dt-bindings/net/qcom,qca808x.h
-@@ -0,0 +1,14 @@
-+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-+/*
-+ * Device Tree constants for the Qualcomm QCA808X PHYs
-+ */
-+
-+#ifndef _DT_BINDINGS_QCOM_QCA808X_H
-+#define _DT_BINDINGS_QCOM_QCA808X_H
-+
-+/* PHY package modes of QCA8084 to apply the interface modes of two PCSes. */
-+#define QCA808X_PCS1_10G_QXGMII_PCS0_UNUNSED 0
-+#define QCA808X_PCS1_SGMII_MAC_PCS0_SGMII_MAC 1
-+#define QCA808X_PCS1_SGMII_MAC_PCS0_SGMII_PHY 2
-+
-+#endif
+++ /dev/null
-From 9dec04efa81322029e210281b1753a2eb5279e27 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Thu, 6 Apr 2023 18:09:07 +0800
-Subject: [PATCH 02/50] net: phy: qca808x: Add QCA8084 ethernet phy support
-
-Add QCA8084 Quad-PHY support, which is a four-port PHY with
-maximum link capability of 2.5 Gbps. The features of each port
-are almost same as QCA8081. The slave seed and fast retrain
-configs are not needed for QCA8084. It includes two PCSes.
-
-PCS0 of QCA8084 supports the interface modes:
-PHY_INTERFACE_MODE_2500BASEX and PHY_INTERFACE_MODE_SGMII.
-
-PCS1 of QCA8084 supports the interface modes:
-PHY_INTERFACE_MODE_10G_QXGMII, PHY_INTERFACE_MODE_2500BASEX and
-PHY_INTERFACE_MODE_SGMII.
-
-The additional CDT configurations needed for QCA8084 compared
-with QCA8081.
-
-Change-Id: I12555fa70662682474ab4432204405b5e752fef6
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/phy/qcom/qca808x.c | 62 ++++++++++++++++++++++++++++++++--
- 1 file changed, 60 insertions(+), 2 deletions(-)
-
---- a/drivers/net/phy/qcom/qca808x.c
-+++ b/drivers/net/phy/qcom/qca808x.c
-@@ -86,9 +86,16 @@
- #define QCA8081_PHY_FIFO_RSTN BIT(11)
-
- #define QCA8081_PHY_ID 0x004dd101
-+#define QCA8084_PHY_ID 0x004dd180
-+
-+#define QCA8084_MMD3_CDT_PULSE_CTRL 0x8075
-+#define QCA8084_CDT_PULSE_THRESH_VAL 0xa060
-+
-+#define QCA8084_MMD3_CDT_NEAR_CTRL 0x807f
-+#define QCA8084_CDT_NEAR_BYPASS BIT(15)
-
- MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
--MODULE_AUTHOR("Matus Ujhelyi");
-+MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
- MODULE_LICENSE("GPL");
-
- struct qca808x_priv {
-@@ -153,7 +160,9 @@ static bool qca808x_is_prefer_master(str
-
- static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
- {
-- return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
-+ return phydev_id_compare(phydev, QCA8081_PHY_ID) &&
-+ linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
-+ phydev->supported);
- }
-
- static bool qca808x_is_1g_only(struct phy_device *phydev)
-@@ -273,6 +282,23 @@ static int qca808x_read_status(struct ph
- return ret;
-
- if (phydev->link) {
-+ /* There are two PCSes available for QCA8084, which support
-+ * the following interface modes.
-+ *
-+ * 1. PHY_INTERFACE_MODE_10G_QXGMII utilizes PCS1 for all
-+ * available 4 ports, which is for all link speeds.
-+ *
-+ * 2. PHY_INTERFACE_MODE_2500BASEX utilizes PCS0 for the
-+ * fourth port, which is only for the link speed 2500M same
-+ * as QCA8081.
-+ *
-+ * 3. PHY_INTERFACE_MODE_SGMII utilizes PCS0 for the fourth
-+ * port, which is for the link speed 10M, 100M and 1000M same
-+ * as QCA8081.
-+ */
-+ if (phydev->interface == PHY_INTERFACE_MODE_10G_QXGMII)
-+ return 0;
-+
- if (phydev->speed == SPEED_2500)
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
- else
-@@ -352,6 +378,18 @@ static int qca808x_cable_test_start(stru
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
-
-+ if (phydev_id_compare(phydev, QCA8084_PHY_ID)) {
-+ /* Adjust the positive and negative pulse thereshold of CDT. */
-+ phy_write_mmd(phydev, MDIO_MMD_PCS,
-+ QCA8084_MMD3_CDT_PULSE_CTRL,
-+ QCA8084_CDT_PULSE_THRESH_VAL);
-+
-+ /* Disable the near bypass of CDT. */
-+ phy_modify_mmd(phydev, MDIO_MMD_PCS,
-+ QCA8084_MMD3_CDT_NEAR_CTRL,
-+ QCA8084_CDT_NEAR_BYPASS, 0);
-+ }
-+
- return 0;
- }
-
-@@ -651,12 +689,32 @@ static struct phy_driver qca808x_driver[
- .led_hw_control_set = qca808x_led_hw_control_set,
- .led_hw_control_get = qca808x_led_hw_control_get,
- .led_polarity_set = qca808x_led_polarity_set,
-+}, {
-+ /* Qualcomm QCA8084 */
-+ PHY_ID_MATCH_MODEL(QCA8084_PHY_ID),
-+ .name = "Qualcomm QCA8084",
-+ .flags = PHY_POLL_CABLE_TEST,
-+ .config_intr = at803x_config_intr,
-+ .handle_interrupt = at803x_handle_interrupt,
-+ .get_tunable = at803x_get_tunable,
-+ .set_tunable = at803x_set_tunable,
-+ .set_wol = at803x_set_wol,
-+ .get_wol = at803x_get_wol,
-+ .get_features = qca808x_get_features,
-+ .config_aneg = qca808x_config_aneg,
-+ .suspend = genphy_suspend,
-+ .resume = genphy_resume,
-+ .read_status = qca808x_read_status,
-+ .soft_reset = qca808x_soft_reset,
-+ .cable_test_start = qca808x_cable_test_start,
-+ .cable_test_get_status = qca808x_cable_test_get_status,
- }, };
-
- module_phy_driver(qca808x_driver);
-
- static struct mdio_device_id __maybe_unused qca808x_tbl[] = {
- { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
-+ { PHY_ID_MATCH_MODEL(QCA8084_PHY_ID) },
- { }
- };
-
+++ /dev/null
-From fd5ec7c0a9f7167baf377a4bbae72eda391df996 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 8 Nov 2023 16:18:02 +0800
-Subject: [PATCH 03/50] net: phy: qca808x: Add config_init function for QCA8084
-
-1. The ADC of QCA8084 PHY must be configured as edge inverted
-and falling whenever it is initialized or reset. In addition,
-the default MSE (Mean square error) threshold value is adjusted,
-which comes into play during link partner detection to detect
-the valid link signal.
-
-2. Add the possible interface modes.
- When QCA8084 works on the interface mode SGMII or 2500BASE-X, the
- interface mode can be switched according to the PHY link speed.
-
- When QCA8084 works on the 10G-QXGMII mode, which will be the only
- possible interface mode.
-
-Change-Id: I832c0d0b069e95cc411a8a7b680a5f60e1d6041a
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/phy/qcom/qca808x.c | 38 ++++++++++++++++++++++++++++++++++
- 1 file changed, 38 insertions(+)
-
---- a/drivers/net/phy/qcom/qca808x.c
-+++ b/drivers/net/phy/qcom/qca808x.c
-@@ -94,6 +94,15 @@
- #define QCA8084_MMD3_CDT_NEAR_CTRL 0x807f
- #define QCA8084_CDT_NEAR_BYPASS BIT(15)
-
-+/* QCA8084 ADC clock edge */
-+#define QCA8084_ADC_CLK_SEL 0x8b80
-+#define QCA8084_ADC_CLK_SEL_ACLK GENMASK(7, 4)
-+#define QCA8084_ADC_CLK_SEL_ACLK_FALL 0xf
-+#define QCA8084_ADC_CLK_SEL_ACLK_RISE 0x0
-+
-+#define QCA8084_MSE_THRESHOLD 0x800a
-+#define QCA8084_MSE_THRESHOLD_2P5G_VAL 0x51c6
-+
- MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
- MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
- MODULE_LICENSE("GPL");
-@@ -660,6 +669,34 @@ static int qca808x_led_polarity_set(stru
- active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
- }
-
-+static int qca8084_config_init(struct phy_device *phydev)
-+{
-+ int ret;
-+
-+ if (phydev->interface == PHY_INTERFACE_MODE_10G_QXGMII)
-+ __set_bit(PHY_INTERFACE_MODE_10G_QXGMII,
-+ phydev->possible_interfaces);
-+ else
-+ qca808x_fill_possible_interfaces(phydev);
-+
-+ /* Configure the ADC to convert the signal using falling edge
-+ * instead of the default rising edge.
-+ */
-+ ret = at803x_debug_reg_mask(phydev, QCA8084_ADC_CLK_SEL,
-+ QCA8084_ADC_CLK_SEL_ACLK,
-+ FIELD_PREP(QCA8084_ADC_CLK_SEL_ACLK,
-+ QCA8084_ADC_CLK_SEL_ACLK_FALL));
-+ if (ret < 0)
-+ return ret;
-+
-+ /* Adjust MSE threshold value to avoid link issue with
-+ * some link partner.
-+ */
-+ return phy_write_mmd(phydev, MDIO_MMD_PMAPMD,
-+ QCA8084_MSE_THRESHOLD,
-+ QCA8084_MSE_THRESHOLD_2P5G_VAL);
-+}
-+
- static struct phy_driver qca808x_driver[] = {
- {
- /* Qualcomm QCA8081 */
-@@ -708,6 +745,7 @@ static struct phy_driver qca808x_driver[
- .soft_reset = qca808x_soft_reset,
- .cable_test_start = qca808x_cable_test_start,
- .cable_test_get_status = qca808x_cable_test_get_status,
-+ .config_init = qca8084_config_init,
- }, };
-
- module_phy_driver(qca808x_driver);
+++ /dev/null
-From d9b391e7b695b7de04c4363b5ec9ffaaed387353 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 8 Nov 2023 18:01:14 +0800
-Subject: [PATCH 04/50] net: phy: qca808x: Add link_change_notify function for
- QCA8084
-
-When the link is changed, QCA8084 needs to do the fifo reset and
-adjust the IPG level for the 10G-QXGMII link on the speed 1000M.
-
-Change-Id: I21de802c78496fb95f1c5119fe3894c9fdebbd65
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/phy/qcom/qca808x.c | 52 ++++++++++++++++++++++++++++++++++
- 1 file changed, 52 insertions(+)
-
---- a/drivers/net/phy/qcom/qca808x.c
-+++ b/drivers/net/phy/qcom/qca808x.c
-@@ -103,6 +103,14 @@
- #define QCA8084_MSE_THRESHOLD 0x800a
- #define QCA8084_MSE_THRESHOLD_2P5G_VAL 0x51c6
-
-+/* QCA8084 FIFO reset control */
-+#define QCA8084_FIFO_CONTROL 0x19
-+#define QCA8084_FIFO_MAC_2_PHY BIT(1)
-+#define QCA8084_FIFO_PHY_2_MAC BIT(0)
-+
-+#define QCA8084_MMD7_IPG_OP 0x901d
-+#define QCA8084_IPG_10_TO_11_EN BIT(0)
-+
- MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
- MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
- MODULE_LICENSE("GPL");
-@@ -697,6 +705,49 @@ static int qca8084_config_init(struct ph
- QCA8084_MSE_THRESHOLD_2P5G_VAL);
- }
-
-+static void qca8084_link_change_notify(struct phy_device *phydev)
-+{
-+ int ret;
-+
-+ /* Assert the FIFO between PHY and MAC. */
-+ ret = phy_modify(phydev, QCA8084_FIFO_CONTROL,
-+ QCA8084_FIFO_MAC_2_PHY | QCA8084_FIFO_PHY_2_MAC,
-+ 0);
-+ if (ret) {
-+ phydev_err(phydev, "Asserting PHY FIFO failed\n");
-+ return;
-+ }
-+
-+ /* If the PHY is in 10G_QXGMII mode, the FIFO needs to be kept in
-+ * reset state when link is down, otherwise the FIFO needs to be
-+ * de-asserted after waiting 50 ms to make the assert completed.
-+ */
-+ if (phydev->interface != PHY_INTERFACE_MODE_10G_QXGMII ||
-+ phydev->link) {
-+ msleep(50);
-+
-+ /* Deassert the FIFO between PHY and MAC. */
-+ ret = phy_modify(phydev, QCA8084_FIFO_CONTROL,
-+ QCA8084_FIFO_MAC_2_PHY |
-+ QCA8084_FIFO_PHY_2_MAC,
-+ QCA8084_FIFO_MAC_2_PHY |
-+ QCA8084_FIFO_PHY_2_MAC);
-+ if (ret) {
-+ phydev_err(phydev, "De-asserting PHY FIFO failed\n");
-+ return;
-+ }
-+ }
-+
-+ /* Enable IPG level 10 to 11 tuning for link speed 1000M in the
-+ * 10G_QXGMII mode.
-+ */
-+ if (phydev->interface == PHY_INTERFACE_MODE_10G_QXGMII)
-+ phy_modify_mmd(phydev, MDIO_MMD_AN, QCA8084_MMD7_IPG_OP,
-+ QCA8084_IPG_10_TO_11_EN,
-+ phydev->speed == SPEED_1000 ?
-+ QCA8084_IPG_10_TO_11_EN : 0);
-+}
-+
- static struct phy_driver qca808x_driver[] = {
- {
- /* Qualcomm QCA8081 */
-@@ -746,6 +797,7 @@ static struct phy_driver qca808x_driver[
- .cable_test_start = qca808x_cable_test_start,
- .cable_test_get_status = qca808x_cable_test_get_status,
- .config_init = qca8084_config_init,
-+ .link_change_notify = qca8084_link_change_notify,
- }, };
-
- module_phy_driver(qca808x_driver);
+++ /dev/null
-From 9443d85d8f3e397b025700251516e248fc4e37c0 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 29 Nov 2023 15:21:22 +0800
-Subject: [PATCH 05/50] net: phy: qca808x: Add register access support routines
- for QCA8084
-
-QCA8084 integrates clock controller and security control modules
-besides of the PHY and PCS. The 32bit registers in these modules
-are accessed using special MDIO sequences to read or write these
-registers.
-
-The MDIO address of PHY and PCS are configured by writing to the
-security control register. The package mode for QCA8084 is also
-configured in a similar manner.
-
-Change-Id: I9317307ef9bbc738a6adcbc3ea1be8e6528d711e
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/phy/qcom/qca808x.c | 88 ++++++++++++++++++++++++++++++++++
- 1 file changed, 88 insertions(+)
-
---- a/drivers/net/phy/qcom/qca808x.c
-+++ b/drivers/net/phy/qcom/qca808x.c
-@@ -111,6 +111,22 @@
- #define QCA8084_MMD7_IPG_OP 0x901d
- #define QCA8084_IPG_10_TO_11_EN BIT(0)
-
-+/* QCA8084 includes secure control module, which supports customizing the
-+ * MDIO address of PHY device and PCS device and configuring package mode
-+ * for the interface mode of PCS. The register of secure control is accessed
-+ * by MDIO bus with the special MDIO sequences, where the 32 bits register
-+ * address is split into 3 MDIO operations with 16 bits address.
-+ */
-+#define QCA8084_HIGH_ADDR_PREFIX 0x18
-+#define QCA8084_LOW_ADDR_PREFIX 0x10
-+
-+/* Bottom two bits of REG must be zero */
-+#define QCA8084_MII_REG_MASK GENMASK(4, 0)
-+#define QCA8084_MII_PHY_ADDR_MASK GENMASK(7, 5)
-+#define QCA8084_MII_PAGE_MASK GENMASK(23, 8)
-+#define QCA8084_MII_SW_ADDR_MASK GENMASK(31, 24)
-+#define QCA8084_MII_REG_DATA_UPPER_16_BITS BIT(1)
-+
- MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
- MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
- MODULE_LICENSE("GPL");
-@@ -119,6 +135,78 @@ struct qca808x_priv {
- int led_polarity_mode;
- };
-
-+static int __qca8084_set_page(struct mii_bus *bus, u16 sw_addr, u16 page)
-+{
-+ return __mdiobus_write(bus, QCA8084_HIGH_ADDR_PREFIX | (sw_addr >> 5),
-+ sw_addr & 0x1f, page);
-+}
-+
-+static int __qca8084_mii_read(struct mii_bus *bus, u16 addr, u16 reg, u32 *val)
-+{
-+ int ret, data;
-+
-+ ret = __mdiobus_read(bus, addr, reg);
-+ if (ret < 0)
-+ return ret;
-+
-+ data = ret;
-+ ret = __mdiobus_read(bus, addr,
-+ reg | QCA8084_MII_REG_DATA_UPPER_16_BITS);
-+ if (ret < 0)
-+ return ret;
-+
-+ *val = data | ret << 16;
-+
-+ return 0;
-+}
-+
-+static int __qca8084_mii_write(struct mii_bus *bus, u16 addr, u16 reg, u32 val)
-+{
-+ int ret;
-+
-+ ret = __mdiobus_write(bus, addr, reg, lower_16_bits(val));
-+ if (!ret)
-+ ret = __mdiobus_write(bus, addr,
-+ reg | QCA8084_MII_REG_DATA_UPPER_16_BITS,
-+ upper_16_bits(val));
-+
-+ return ret;
-+}
-+
-+static int qca8084_mii_modify(struct phy_device *phydev, u32 regaddr,
-+ u32 clear, u32 set)
-+{
-+ u16 reg, addr, page, sw_addr;
-+ struct mii_bus *bus;
-+ u32 val;
-+ int ret;
-+
-+ bus = phydev->mdio.bus;
-+ mutex_lock(&bus->mdio_lock);
-+
-+ reg = FIELD_GET(QCA8084_MII_REG_MASK, regaddr);
-+ addr = FIELD_GET(QCA8084_MII_PHY_ADDR_MASK, regaddr);
-+ page = FIELD_GET(QCA8084_MII_PAGE_MASK, regaddr);
-+ sw_addr = FIELD_GET(QCA8084_MII_SW_ADDR_MASK, regaddr);
-+
-+ ret = __qca8084_set_page(bus, sw_addr, page);
-+ if (ret < 0)
-+ goto qca8084_mii_modify_exit;
-+
-+ ret = __qca8084_mii_read(bus, QCA8084_LOW_ADDR_PREFIX | addr,
-+ reg, &val);
-+ if (ret < 0)
-+ goto qca8084_mii_modify_exit;
-+
-+ val &= ~clear;
-+ val |= set;
-+ ret = __qca8084_mii_write(bus, QCA8084_LOW_ADDR_PREFIX | addr,
-+ reg, val);
-+qca8084_mii_modify_exit:
-+ mutex_unlock(&bus->mdio_lock);
-+ return ret;
-+};
-+
- static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
- {
- int ret;
+++ /dev/null
-From 9d0e22124d6f3ca901626dd5537b36c7c0c97812 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Mon, 29 Jan 2024 10:51:38 +0800
-Subject: [PATCH 06/50] net: phy: qca808x: Add QCA8084 probe function
-
-Add the PHY package probe function. The MDIO slave address of
-PHY, PCS and XPCS can be optionally customized by configuring
-the PHY package level register.
-
-In addition, enable system clock of PHY and de-assert PHY in
-the probe function so that the register of PHY device can be
-accessed, and the features of PHY can be acquired.
-
-Change-Id: I2251b9c5c398a21a4ef547a727189a934ad3a44c
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/phy/qcom/qca808x.c | 91 ++++++++++++++++++++++++++++++++++
- 1 file changed, 91 insertions(+)
-
---- a/drivers/net/phy/qcom/qca808x.c
-+++ b/drivers/net/phy/qcom/qca808x.c
-@@ -2,6 +2,8 @@
-
- #include <linux/phy.h>
- #include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/clk.h>
-
- #include "qcom.h"
-
-@@ -127,6 +129,21 @@
- #define QCA8084_MII_SW_ADDR_MASK GENMASK(31, 24)
- #define QCA8084_MII_REG_DATA_UPPER_16_BITS BIT(1)
-
-+/* QCA8084 integrates 4 PHYs, PCS0 and PCS1(includes PCS and XPCS). */
-+#define QCA8084_MDIO_DEVICE_NUM 7
-+
-+#define QCA8084_PCS_CFG 0xc90f014
-+#define QCA8084_PCS_ADDR0_MASK GENMASK(4, 0)
-+#define QCA8084_PCS_ADDR1_MASK GENMASK(9, 5)
-+#define QCA8084_PCS_ADDR2_MASK GENMASK(14, 10)
-+
-+#define QCA8084_EPHY_CFG 0xc90f018
-+#define QCA8084_EPHY_ADDR0_MASK GENMASK(4, 0)
-+#define QCA8084_EPHY_ADDR1_MASK GENMASK(9, 5)
-+#define QCA8084_EPHY_ADDR2_MASK GENMASK(14, 10)
-+#define QCA8084_EPHY_ADDR3_MASK GENMASK(19, 15)
-+#define QCA8084_EPHY_LDO_EN GENMASK(21, 20)
-+
- MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
- MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
- MODULE_LICENSE("GPL");
-@@ -836,6 +853,79 @@ static void qca8084_link_change_notify(s
- QCA8084_IPG_10_TO_11_EN : 0);
- }
-
-+static int qca8084_phy_package_probe_once(struct phy_device *phydev)
-+{
-+ int addr[QCA8084_MDIO_DEVICE_NUM] = {0, 1, 2, 3, 4, 5, 6};
-+ struct phy_package_shared *shared = phydev->shared;
-+ int ret, clear, set;
-+
-+ /* Program the MDIO address of PHY and PCS optionally, the MDIO
-+ * address 0-6 is used for PHY and PCS MDIO devices by default.
-+ */
-+ ret = of_property_read_u32_array(shared->np,
-+ "qcom,phy-addr-fixup",
-+ addr, ARRAY_SIZE(addr));
-+ if (ret && ret != -EINVAL)
-+ return ret;
-+
-+ /* Configure the MDIO addresses for the four PHY devices. */
-+ clear = QCA8084_EPHY_ADDR0_MASK | QCA8084_EPHY_ADDR1_MASK |
-+ QCA8084_EPHY_ADDR2_MASK | QCA8084_EPHY_ADDR3_MASK;
-+ set = FIELD_PREP(QCA8084_EPHY_ADDR0_MASK, addr[0]);
-+ set |= FIELD_PREP(QCA8084_EPHY_ADDR1_MASK, addr[1]);
-+ set |= FIELD_PREP(QCA8084_EPHY_ADDR2_MASK, addr[2]);
-+ set |= FIELD_PREP(QCA8084_EPHY_ADDR3_MASK, addr[3]);
-+
-+ ret = qca8084_mii_modify(phydev, QCA8084_EPHY_CFG, clear, set);
-+ if (ret)
-+ return ret;
-+
-+ /* Configure the MDIO addresses for PCS0 and PCS1 including
-+ * PCS and XPCS.
-+ */
-+ clear = QCA8084_PCS_ADDR0_MASK | QCA8084_PCS_ADDR1_MASK |
-+ QCA8084_PCS_ADDR2_MASK;
-+ set = FIELD_PREP(QCA8084_PCS_ADDR0_MASK, addr[4]);
-+ set |= FIELD_PREP(QCA8084_PCS_ADDR1_MASK, addr[5]);
-+ set |= FIELD_PREP(QCA8084_PCS_ADDR2_MASK, addr[6]);
-+
-+ return qca8084_mii_modify(phydev, QCA8084_PCS_CFG, clear, set);
-+}
-+
-+static int qca8084_probe(struct phy_device *phydev)
-+{
-+ struct device *dev = &phydev->mdio.dev;
-+ struct reset_control *rstc;
-+ struct clk *clk;
-+ int ret;
-+
-+ ret = devm_of_phy_package_join(dev, phydev, 0);
-+ if (ret)
-+ return ret;
-+
-+ if (phy_package_probe_once(phydev)) {
-+ ret = qca8084_phy_package_probe_once(phydev);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ /* Enable clock of PHY device, so that the PHY register
-+ * can be accessed to get PHY features.
-+ */
-+ clk = devm_clk_get_enabled(dev, NULL);
-+ if (IS_ERR(clk))
-+ return dev_err_probe(dev, PTR_ERR(clk),
-+ "Enable PHY clock failed\n");
-+
-+ /* De-assert PHY reset after the clock of PHY enabled. */
-+ rstc = devm_reset_control_get_exclusive(dev, NULL);
-+ if (IS_ERR(rstc))
-+ return dev_err_probe(dev, PTR_ERR(rstc),
-+ "Get PHY reset failed\n");
-+
-+ return reset_control_deassert(rstc);
-+}
-+
- static struct phy_driver qca808x_driver[] = {
- {
- /* Qualcomm QCA8081 */
-@@ -886,6 +976,7 @@ static struct phy_driver qca808x_driver[
- .cable_test_get_status = qca808x_cable_test_get_status,
- .config_init = qca8084_config_init,
- .link_change_notify = qca8084_link_change_notify,
-+ .probe = qca8084_probe,
- }, };
-
- module_phy_driver(qca808x_driver);
+++ /dev/null
-From 324c5b908a5294390ed9659a6439758cb20ecd61 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Tue, 9 Apr 2024 16:30:55 +0800
-Subject: [PATCH 07/50] net: phy: qca808x: Add package clocks and resets for
- QCA8084
-
-Parse the PHY package clocks from the PHY package DTS node.
-These package level clocks will be enabled in the PHY package
-init function.
-
-Deassert PHY package reset, which is necessary for accessing
-the PHY registers.
-
-Change-Id: I254d0aa0a1155d3618c6f1fc7d7a5b6ecadccbaa
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/phy/qcom/qca808x.c | 67 ++++++++++++++++++++++++++++++++--
- 1 file changed, 64 insertions(+), 3 deletions(-)
-
---- a/drivers/net/phy/qcom/qca808x.c
-+++ b/drivers/net/phy/qcom/qca808x.c
-@@ -4,6 +4,7 @@
- #include <linux/module.h>
- #include <linux/of.h>
- #include <linux/clk.h>
-+#include <linux/reset.h>
-
- #include "qcom.h"
-
-@@ -148,10 +149,35 @@ MODULE_DESCRIPTION("Qualcomm Atheros QCA
- MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
- MODULE_LICENSE("GPL");
-
-+enum {
-+ APB_BRIDGE_CLK,
-+ AHB_CLK,
-+ SEC_CTRL_AHB_CLK,
-+ TLMM_CLK,
-+ TLMM_AHB_CLK,
-+ CNOC_AHB_CLK,
-+ MDIO_AHB_CLK,
-+ PACKAGE_CLK_MAX
-+};
-+
- struct qca808x_priv {
- int led_polarity_mode;
- };
-
-+struct qca808x_shared_priv {
-+ struct clk *clk[PACKAGE_CLK_MAX];
-+};
-+
-+static const char *const qca8084_package_clk_name[PACKAGE_CLK_MAX] = {
-+ [APB_BRIDGE_CLK] = "apb_bridge",
-+ [AHB_CLK] = "ahb",
-+ [SEC_CTRL_AHB_CLK] = "sec_ctrl_ahb",
-+ [TLMM_CLK] = "tlmm",
-+ [TLMM_AHB_CLK] = "tlmm_ahb",
-+ [CNOC_AHB_CLK] = "cnoc_ahb",
-+ [MDIO_AHB_CLK] = "mdio_ahb",
-+};
-+
- static int __qca8084_set_page(struct mii_bus *bus, u16 sw_addr, u16 page)
- {
- return __mdiobus_write(bus, QCA8084_HIGH_ADDR_PREFIX | (sw_addr >> 5),
-@@ -853,11 +879,24 @@ static void qca8084_link_change_notify(s
- QCA8084_IPG_10_TO_11_EN : 0);
- }
-
-+/* QCA8084 is a four-port PHY, which integrates the clock controller,
-+ * 4 PHY devices and 2 PCS interfaces (PCS0 and PCS1). PCS1 includes
-+ * XPCS and PCS to support 10G-QXGMII and SGMII. PCS0 includes one PCS
-+ * to support SGMII.
-+ *
-+ * The clocks and resets are sourced from the integrated clock controller
-+ * of the PHY package. This integrated clock controller is driven by a
-+ * QCA8K clock provider that supplies the clocks and resets to the four
-+ * PHYs, PCS and PHY package.
-+ */
- static int qca8084_phy_package_probe_once(struct phy_device *phydev)
- {
- int addr[QCA8084_MDIO_DEVICE_NUM] = {0, 1, 2, 3, 4, 5, 6};
- struct phy_package_shared *shared = phydev->shared;
-- int ret, clear, set;
-+ struct qca808x_shared_priv *shared_priv;
-+ struct reset_control *rstc;
-+ int i, ret, clear, set;
-+ struct clk *clk;
-
- /* Program the MDIO address of PHY and PCS optionally, the MDIO
- * address 0-6 is used for PHY and PCS MDIO devices by default.
-@@ -889,17 +928,39 @@ static int qca8084_phy_package_probe_onc
- set |= FIELD_PREP(QCA8084_PCS_ADDR1_MASK, addr[5]);
- set |= FIELD_PREP(QCA8084_PCS_ADDR2_MASK, addr[6]);
-
-- return qca8084_mii_modify(phydev, QCA8084_PCS_CFG, clear, set);
-+ ret = qca8084_mii_modify(phydev, QCA8084_PCS_CFG, clear, set);
-+ if (ret)
-+ return ret;
-+
-+ shared_priv = shared->priv;
-+ for (i = 0; i < ARRAY_SIZE(qca8084_package_clk_name); i++) {
-+ clk = of_clk_get_by_name(shared->np,
-+ qca8084_package_clk_name[i]);
-+ if (IS_ERR(clk))
-+ return dev_err_probe(&phydev->mdio.dev, PTR_ERR(clk),
-+ "package clock %s not ready\n",
-+ qca8084_package_clk_name[i]);
-+ shared_priv->clk[i] = clk;
-+ }
-+
-+ rstc = of_reset_control_get_exclusive(shared->np, NULL);
-+ if (IS_ERR(rstc))
-+ return dev_err_probe(&phydev->mdio.dev, PTR_ERR(rstc),
-+ "package reset not ready\n");
-+
-+ /* Deassert PHY package. */
-+ return reset_control_deassert(rstc);
- }
-
- static int qca8084_probe(struct phy_device *phydev)
- {
-+ struct qca808x_shared_priv *shared_priv;
- struct device *dev = &phydev->mdio.dev;
- struct reset_control *rstc;
- struct clk *clk;
- int ret;
-
-- ret = devm_of_phy_package_join(dev, phydev, 0);
-+ ret = devm_of_phy_package_join(dev, phydev, sizeof(*shared_priv));
- if (ret)
- return ret;
-
+++ /dev/null
-From 392a648b7b0324d03e6f6a7b326e33136d79b134 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Thu, 25 Jan 2024 17:13:24 +0800
-Subject: [PATCH 08/50] net: phy: qca808x: Add QCA8084 package init function
-
-The package mode of PHY is configured for the interface mode of two
-PCSes working correctly.
-
-The PHY package level clocks are enabled and their rates configured.
-
-Change-Id: I63d4b22d2a70ee713cc6a6818b0f3c7aa098a5f5
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/phy/qcom/qca808x.c | 115 +++++++++++++++++++++++++++++++++
- 1 file changed, 115 insertions(+)
-
---- a/drivers/net/phy/qcom/qca808x.c
-+++ b/drivers/net/phy/qcom/qca808x.c
-@@ -1,5 +1,6 @@
- // SPDX-License-Identifier: GPL-2.0+
-
-+#include <dt-bindings/net/qcom,qca808x.h>
- #include <linux/phy.h>
- #include <linux/module.h>
- #include <linux/of.h>
-@@ -145,6 +146,13 @@
- #define QCA8084_EPHY_ADDR3_MASK GENMASK(19, 15)
- #define QCA8084_EPHY_LDO_EN GENMASK(21, 20)
-
-+#define QCA8084_WORK_MODE_CFG 0xc90f030
-+#define QCA8084_WORK_MODE_MASK GENMASK(5, 0)
-+#define QCA8084_WORK_MODE_QXGMII (BIT(5) | GENMASK(3, 0))
-+#define QCA8084_WORK_MODE_QXGMII_PORT4_SGMII (BIT(5) | GENMASK(2, 0))
-+#define QCA8084_WORK_MODE_SWITCH BIT(4)
-+#define QCA8084_WORK_MODE_SWITCH_PORT4_SGMII BIT(5)
-+
- MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
- MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
- MODULE_LICENSE("GPL");
-@@ -165,6 +173,7 @@ struct qca808x_priv {
- };
-
- struct qca808x_shared_priv {
-+ int package_mode;
- struct clk *clk[PACKAGE_CLK_MAX];
- };
-
-@@ -808,10 +817,107 @@ static int qca808x_led_polarity_set(stru
- active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
- }
-
-+static int qca8084_package_clock_init(struct qca808x_shared_priv *shared_priv)
-+{
-+ int ret;
-+
-+ /* Configure clock rate 312.5MHZ for the PHY package
-+ * APB bridge clock tree.
-+ */
-+ ret = clk_set_rate(shared_priv->clk[APB_BRIDGE_CLK], 312500000);
-+ if (ret)
-+ return ret;
-+
-+ ret = clk_prepare_enable(shared_priv->clk[APB_BRIDGE_CLK]);
-+ if (ret)
-+ return ret;
-+
-+ /* Configure clock rate 104.17MHZ for the PHY package
-+ * AHB clock tree.
-+ */
-+ ret = clk_set_rate(shared_priv->clk[AHB_CLK], 104170000);
-+ if (ret)
-+ return ret;
-+
-+ ret = clk_prepare_enable(shared_priv->clk[AHB_CLK]);
-+ if (ret)
-+ return ret;
-+
-+ ret = clk_prepare_enable(shared_priv->clk[SEC_CTRL_AHB_CLK]);
-+ if (ret)
-+ return ret;
-+
-+ ret = clk_prepare_enable(shared_priv->clk[TLMM_CLK]);
-+ if (ret)
-+ return ret;
-+
-+ ret = clk_prepare_enable(shared_priv->clk[TLMM_AHB_CLK]);
-+ if (ret)
-+ return ret;
-+
-+ ret = clk_prepare_enable(shared_priv->clk[CNOC_AHB_CLK]);
-+ if (ret)
-+ return ret;
-+
-+ return clk_prepare_enable(shared_priv->clk[MDIO_AHB_CLK]);
-+}
-+
-+static int qca8084_phy_package_config_init_once(struct phy_device *phydev)
-+{
-+ struct phy_package_shared *shared = phydev->shared;
-+ struct qca808x_shared_priv *shared_priv;
-+ int ret, mode;
-+
-+ shared_priv = shared->priv;
-+ switch (shared_priv->package_mode) {
-+ case QCA808X_PCS1_10G_QXGMII_PCS0_UNUNSED:
-+ mode = QCA8084_WORK_MODE_QXGMII;
-+ break;
-+ case QCA808X_PCS1_SGMII_MAC_PCS0_SGMII_MAC:
-+ mode = QCA8084_WORK_MODE_SWITCH;
-+ break;
-+ case QCA808X_PCS1_SGMII_MAC_PCS0_SGMII_PHY:
-+ mode = QCA8084_WORK_MODE_SWITCH_PORT4_SGMII;
-+ break;
-+ default:
-+ phydev_err(phydev, "Invalid qcom,package-mode %d\n",
-+ shared_priv->package_mode);
-+ return -EINVAL;
-+ }
-+
-+ ret = qca8084_mii_modify(phydev, QCA8084_WORK_MODE_CFG,
-+ QCA8084_WORK_MODE_MASK,
-+ FIELD_PREP(QCA8084_WORK_MODE_MASK, mode));
-+ if (ret)
-+ return ret;
-+
-+ /* Initialize the PHY package clock and reset, which is the
-+ * necessary config sequence after GPIO reset on the PHY package.
-+ */
-+ ret = qca8084_package_clock_init(shared_priv);
-+ if (ret)
-+ return ret;
-+
-+ /* Enable efuse loading into analog circuit */
-+ ret = qca8084_mii_modify(phydev, QCA8084_EPHY_CFG,
-+ QCA8084_EPHY_LDO_EN, 0);
-+ if (ret)
-+ return ret;
-+
-+ usleep_range(10000, 11000);
-+ return ret;
-+}
-+
- static int qca8084_config_init(struct phy_device *phydev)
- {
- int ret;
-
-+ if (phy_package_init_once(phydev)) {
-+ ret = qca8084_phy_package_config_init_once(phydev);
-+ if (ret)
-+ return ret;
-+ }
-+
- if (phydev->interface == PHY_INTERFACE_MODE_10G_QXGMII)
- __set_bit(PHY_INTERFACE_MODE_10G_QXGMII,
- phydev->possible_interfaces);
-@@ -948,6 +1054,15 @@ static int qca8084_phy_package_probe_onc
- return dev_err_probe(&phydev->mdio.dev, PTR_ERR(rstc),
- "package reset not ready\n");
-
-+ /* The package mode 10G-QXGMII of PCS1 is used for Quad PHY and
-+ * PCS0 is unused by default.
-+ */
-+ shared_priv->package_mode = QCA808X_PCS1_10G_QXGMII_PCS0_UNUNSED;
-+ ret = of_property_read_u32(shared->np, "qcom,package-mode",
-+ &shared_priv->package_mode);
-+ if (ret && ret != -EINVAL)
-+ return ret;
-+
- /* Deassert PHY package. */
- return reset_control_deassert(rstc);
- }
+++ /dev/null
-From 5e4192952cfb2110aaba1b03a3c66c84d74a27db Mon Sep 17 00:00:00 2001
-From: Lei Wei <quic_leiwei@quicinc.com>
-Date: Mon, 29 Jan 2024 11:39:36 +0800
-Subject: [PATCH 11/50] net: pcs: Add driver for Qualcomm IPQ UNIPHY PCS
-
-The UNIPHY hardware block in Qualcomm's IPQ SoC based boards enables
-PCS and XPCS functions, and helps in interfacing the Ethernet MAC in
-IPQ SoC to external PHYs.
-
-This patch adds the PCS driver support for the UNIPHY hardware used in
-IPQ SoC based boards. Support for SGMII/QSGMII/PSGMII and USXGMII
-interface modes are added in the driver.
-
-Change-Id: Id2c8f993f121098f7b02186b53770b75bb539a93
-Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
----
- MAINTAINERS | 8 +
- drivers/net/pcs/Kconfig | 10 +
- drivers/net/pcs/Makefile | 1 +
- drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 943 ++++++++++++++++++++++++
- include/linux/pcs/pcs-qcom-ipq-uniphy.h | 13 +
- 5 files changed, 975 insertions(+)
- create mode 100644 drivers/net/pcs/pcs-qcom-ipq-uniphy.c
- create mode 100644 include/linux/pcs/pcs-qcom-ipq-uniphy.h
-
-# diff --git a/MAINTAINERS b/MAINTAINERS
-# index 8836b2200acf..1940990ae342 100644
-# --- a/MAINTAINERS
-# +++ b/MAINTAINERS
-# @@ -18900,6 +18900,14 @@ S: Maintained
-# F: Documentation/devicetree/bindings/regulator/vqmmc-ipq4019-regulator.yaml
-# F: drivers/regulator/vqmmc-ipq4019-regulator.c
-
-# +QUALCOMM IPQ Ethernet UNIPHY PCS DRIVER
-# +M: Lei Wei <quic_leiwei@quicinc.com>
-# +L: netdev@vger.kernel.org
-# +S: Supported
-# +F: Documentation/devicetree/bindings/net/pcs/qcom,ipq-uniphy.yaml
-# +F: drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-# +F: include/linux/pcs/pcs-qcom-ipq-uniphy.h
-# +
-# QUALCOMM NAND CONTROLLER DRIVER
-# M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-# L: linux-mtd@lists.infradead.org
---- a/drivers/net/pcs/Kconfig
-+++ b/drivers/net/pcs/Kconfig
-@@ -44,4 +44,14 @@ config PCS_RZN1_MIIC
- on RZ/N1 SoCs. This PCS converts MII to RMII/RGMII or can be set in
- pass-through mode for MII.
-
-+config PCS_QCOM_IPQ_UNIPHY
-+ tristate "Qualcomm IPQ UNIPHY PCS driver"
-+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
-+ depends on HAS_IOMEM
-+ help
-+ This module provides PCS driver for Qualcomm IPQ UNIPHY that is
-+ available on Qualcomm IPQ SoCs. The UNIPHY provides both PCS and XPCS
-+ functions to support different interface modes for MAC to PHY connections.
-+ These modes help to support various combination of ethernet switch/PHY on
-+ IPQ SoC based boards.
- endmenu
---- a/drivers/net/pcs/Makefile
-+++ b/drivers/net/pcs/Makefile
-@@ -8,3 +8,4 @@ obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
- obj-$(CONFIG_PCS_MTK_LYNXI) += pcs-mtk-lynxi.o
- obj-$(CONFIG_PCS_RZN1_MIIC) += pcs-rzn1-miic.o
- obj-$(CONFIG_PCS_MTK_USXGMII) += pcs-mtk-usxgmii.o
-+obj-$(CONFIG_PCS_QCOM_IPQ_UNIPHY) += pcs-qcom-ipq-uniphy.o
---- /dev/null
-+++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-@@ -0,0 +1,943 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ *
-+ */
-+
-+#include <linux/clk.h>
-+#include <linux/clk-provider.h>
-+#include <linux/device.h>
-+#include <linux/of.h>
-+#include <linux/of_platform.h>
-+#include <linux/pcs/pcs-qcom-ipq-uniphy.h>
-+#include <linux/phylink.h>
-+#include <linux/platform_device.h>
-+#include <linux/reset.h>
-+
-+/* Maximum PCS channel numbers, For PSGMII it has 5 channels */
-+#define PCS_MAX_CHANNELS 5
-+
-+#define PCS_CALIBRATION 0x1e0
-+#define PCS_CALIBRATION_DONE BIT(7)
-+
-+#define PCS_MODE_CTRL 0x46c
-+#define PCS_MODE_SEL_MASK GENMASK(12, 8)
-+#define PCS_MODE_SGMII FIELD_PREP(PCS_MODE_SEL_MASK, 0x4)
-+#define PCS_MODE_QSGMII FIELD_PREP(PCS_MODE_SEL_MASK, 0x1)
-+#define PCS_MODE_PSGMII FIELD_PREP(PCS_MODE_SEL_MASK, 0x2)
-+#define PCS_MODE_XPCS FIELD_PREP(PCS_MODE_SEL_MASK, 0x10)
-+#define PCS_MODE_AN_MODE BIT(0)
-+
-+#define PCS_CHANNEL_CTRL(x) (0x480 + 0x18 * (x))
-+#define PCS_CHANNEL_ADPT_RESET BIT(11)
-+#define PCS_CHANNEL_FORCE_MODE BIT(3)
-+#define PCS_CHANNEL_SPEED_MASK GENMASK(2, 1)
-+#define PCS_CHANNEL_SPEED_1000 FIELD_PREP(PCS_CHANNEL_SPEED_MASK, 0x2)
-+#define PCS_CHANNEL_SPEED_100 FIELD_PREP(PCS_CHANNEL_SPEED_MASK, 0x1)
-+#define PCS_CHANNEL_SPEED_10 FIELD_PREP(PCS_CHANNEL_SPEED_MASK, 0x0)
-+
-+#define PCS_CHANNEL_STS(x) (0x488 + 0x18 * (x))
-+#define PCS_CHANNEL_LINK_STS BIT(7)
-+#define PCS_CHANNEL_STS_DUPLEX_FULL BIT(6)
-+#define PCS_CHANNEL_STS_SPEED_MASK GENMASK(5, 4)
-+#define PCS_CHANNEL_STS_SPEED_10 0
-+#define PCS_CHANNEL_STS_SPEED_100 1
-+#define PCS_CHANNEL_STS_SPEED_1000 2
-+#define PCS_CHANNEL_STS_PAUSE_TX_EN BIT(1)
-+#define PCS_CHANNEL_STS_PAUSE_RX_EN BIT(0)
-+
-+#define PCS_PLL_RESET 0x780
-+#define PCS_ANA_SW_RESET BIT(6)
-+
-+#define XPCS_INDIRECT_ADDR 0x8000
-+#define XPCS_INDIRECT_AHB_ADDR 0x83fc
-+#define XPCS_INDIRECT_ADDR_H GENMASK(20, 8)
-+#define XPCS_INDIRECT_ADDR_L GENMASK(7, 0)
-+#define XPCS_INDIRECT_DATA_ADDR(reg) (FIELD_PREP(GENMASK(15, 10), 0x20) | \
-+ FIELD_PREP(GENMASK(9, 2), \
-+ FIELD_GET(XPCS_INDIRECT_ADDR_L, reg)))
-+
-+#define XPCS_DIG_CTRL 0x38000
-+#define XPCS_USXG_ADPT_RESET BIT(10)
-+#define XPCS_USXG_EN BIT(9)
-+
-+#define XPCS_MII_CTRL 0x1f0000
-+#define XPCS_MII_AN_EN BIT(12)
-+#define XPCS_DUPLEX_FULL BIT(8)
-+#define XPCS_SPEED_MASK (BIT(13) | BIT(6) | BIT(5))
-+#define XPCS_SPEED_10000 (BIT(13) | BIT(6))
-+#define XPCS_SPEED_5000 (BIT(13) | BIT(5))
-+#define XPCS_SPEED_2500 BIT(5)
-+#define XPCS_SPEED_1000 BIT(6)
-+#define XPCS_SPEED_100 BIT(13)
-+#define XPCS_SPEED_10 0
-+
-+#define XPCS_MII_AN_CTRL 0x1f8001
-+#define XPCS_MII_AN_8BIT BIT(8)
-+
-+#define XPCS_MII_AN_INTR_STS 0x1f8002
-+#define XPCS_USXG_AN_LINK_STS BIT(14)
-+#define XPCS_USXG_AN_DUPLEX_FULL BIT(13)
-+#define XPCS_USXG_AN_SPEED_MASK GENMASK(12, 10)
-+#define XPCS_USXG_AN_SPEED_10 0
-+#define XPCS_USXG_AN_SPEED_100 1
-+#define XPCS_USXG_AN_SPEED_1000 2
-+#define XPCS_USXG_AN_SPEED_2500 4
-+#define XPCS_USXG_AN_SPEED_5000 5
-+#define XPCS_USXG_AN_SPEED_10000 3
-+
-+/* UNIPHY PCS RAW clock ID */
-+enum {
-+ PCS_RAW_RX_CLK = 0,
-+ PCS_RAW_TX_CLK,
-+ PCS_RAW_CLK_MAX
-+};
-+
-+/* UNIPHY PCS raw clock */
-+struct ipq_unipcs_raw_clk {
-+ struct clk_hw hw;
-+ unsigned long rate;
-+};
-+
-+/* UNIPHY PCS clock ID */
-+enum {
-+ PCS_SYS_CLK,
-+ PCS_AHB_CLK,
-+ PCS_CLK_MAX
-+};
-+
-+/* UNIPHY PCS reset ID */
-+enum {
-+ PCS_SYS_RESET,
-+ PCS_AHB_RESET,
-+ XPCS_RESET,
-+ PCS_RESET_MAX
-+};
-+
-+/* UNIPHY PCS clock name */
-+static const char *const pcs_clock_name[PCS_CLK_MAX] = {
-+ "sys",
-+ "ahb",
-+};
-+
-+/* UNIPHY PCS reset name */
-+static const char *const pcs_reset_name[PCS_RESET_MAX] = {
-+ "sys",
-+ "ahb",
-+ "xpcs",
-+};
-+
-+/* UNIPHY PCS channel clock ID */
-+enum {
-+ PCS_CH_RX_CLK,
-+ PCS_CH_TX_CLK,
-+ PCS_CH_CLK_MAX
-+};
-+
-+/* UNIPHY PCS channel clock name */
-+static const char *const pcs_ch_clock_name[PCS_CH_CLK_MAX] = {
-+ "ch_rx",
-+ "ch_tx",
-+};
-+
-+/* UNIPHY PCS private data instance */
-+struct ipq_uniphy_pcs {
-+ void __iomem *base;
-+ struct device *dev;
-+ phy_interface_t interface;
-+ struct mutex shared_lock; /* Lock to protect shared config */
-+ struct clk *clk[PCS_CLK_MAX];
-+ struct reset_control *reset[PCS_RESET_MAX];
-+ struct ipq_unipcs_raw_clk raw_clk[PCS_RAW_CLK_MAX];
-+};
-+
-+/* UNIPHY PCS channel private data instance */
-+struct ipq_uniphy_pcs_ch {
-+ struct ipq_uniphy_pcs *qunipcs;
-+ struct phylink_pcs pcs;
-+ int channel;
-+ struct clk *clk[PCS_CH_CLK_MAX];
-+};
-+
-+#define to_unipcs_raw_clk(_hw) \
-+ container_of(_hw, struct ipq_unipcs_raw_clk, hw)
-+#define phylink_pcs_to_unipcs(_pcs) \
-+ container_of(_pcs, struct ipq_uniphy_pcs_ch, pcs)
-+
-+static unsigned long ipq_unipcs_raw_clk_recalc_rate(struct clk_hw *hw,
-+ unsigned long parent_rate)
-+{
-+ struct ipq_unipcs_raw_clk *raw_clk = to_unipcs_raw_clk(hw);
-+
-+ return raw_clk->rate;
-+}
-+
-+static int ipq_unipcs_raw_clk_determine_rate(struct clk_hw *hw,
-+ struct clk_rate_request *req)
-+{
-+ switch (req->rate) {
-+ case 125000000:
-+ case 312500000:
-+ return 0;
-+ default:
-+ return -EINVAL;
-+ }
-+}
-+
-+static int ipq_unipcs_raw_clk_set_rate(struct clk_hw *hw,
-+ unsigned long rate,
-+ unsigned long parent_rate)
-+{
-+ struct ipq_unipcs_raw_clk *raw_clk = to_unipcs_raw_clk(hw);
-+
-+ switch (rate) {
-+ case 125000000:
-+ case 312500000:
-+ raw_clk->rate = rate;
-+ return 0;
-+ default:
-+ return -EINVAL;
-+ }
-+}
-+
-+static const struct clk_ops ipq_unipcs_raw_clk_ops = {
-+ .recalc_rate = ipq_unipcs_raw_clk_recalc_rate,
-+ .determine_rate = ipq_unipcs_raw_clk_determine_rate,
-+ .set_rate = ipq_unipcs_raw_clk_set_rate,
-+};
-+
-+static u32 ipq_unipcs_reg_read32(struct ipq_uniphy_pcs *qunipcs, u32 reg)
-+{
-+ /* PCS use direct AHB access while XPCS use indirect AHB access */
-+ if (reg >= XPCS_INDIRECT_ADDR) {
-+ writel(FIELD_GET(XPCS_INDIRECT_ADDR_H, reg),
-+ qunipcs->base + XPCS_INDIRECT_AHB_ADDR);
-+ return readl(qunipcs->base + XPCS_INDIRECT_DATA_ADDR(reg));
-+ } else {
-+ return readl(qunipcs->base + reg);
-+ }
-+}
-+
-+static void ipq_unipcs_reg_write32(struct ipq_uniphy_pcs *qunipcs,
-+ u32 reg, u32 val)
-+{
-+ if (reg >= XPCS_INDIRECT_ADDR) {
-+ writel(FIELD_GET(XPCS_INDIRECT_ADDR_H, reg),
-+ qunipcs->base + XPCS_INDIRECT_AHB_ADDR);
-+ writel(val, qunipcs->base + XPCS_INDIRECT_DATA_ADDR(reg));
-+ } else {
-+ writel(val, qunipcs->base + reg);
-+ }
-+}
-+
-+static void ipq_unipcs_reg_modify32(struct ipq_uniphy_pcs *qunipcs,
-+ u32 reg, u32 mask, u32 set)
-+{
-+ u32 val;
-+
-+ val = ipq_unipcs_reg_read32(qunipcs, reg);
-+ val &= ~mask;
-+ val |= set;
-+ ipq_unipcs_reg_write32(qunipcs, reg, val);
-+}
-+
-+static void ipq_unipcs_get_state_sgmii(struct ipq_uniphy_pcs *qunipcs,
-+ int channel,
-+ struct phylink_link_state *state)
-+{
-+ u32 val;
-+
-+ val = ipq_unipcs_reg_read32(qunipcs, PCS_CHANNEL_STS(channel));
-+
-+ state->link = !!(val & PCS_CHANNEL_LINK_STS);
-+
-+ if (!state->link)
-+ return;
-+
-+ switch (FIELD_GET(PCS_CHANNEL_STS_SPEED_MASK, val)) {
-+ case PCS_CHANNEL_STS_SPEED_1000:
-+ state->speed = SPEED_1000;
-+ break;
-+ case PCS_CHANNEL_STS_SPEED_100:
-+ state->speed = SPEED_100;
-+ break;
-+ case PCS_CHANNEL_STS_SPEED_10:
-+ state->speed = SPEED_10;
-+ break;
-+ default:
-+ return;
-+ }
-+
-+ if (val & PCS_CHANNEL_STS_DUPLEX_FULL)
-+ state->duplex = DUPLEX_FULL;
-+ else
-+ state->duplex = DUPLEX_HALF;
-+
-+ if (val & PCS_CHANNEL_STS_PAUSE_TX_EN)
-+ state->pause |= MLO_PAUSE_TX;
-+ if (val & PCS_CHANNEL_STS_PAUSE_RX_EN)
-+ state->pause |= MLO_PAUSE_RX;
-+}
-+
-+static void ipq_unipcs_get_state_usxgmii(struct ipq_uniphy_pcs *qunipcs,
-+ struct phylink_link_state *state)
-+{
-+ u32 val;
-+
-+ val = ipq_unipcs_reg_read32(qunipcs, XPCS_MII_AN_INTR_STS);
-+
-+ state->link = !!(val & XPCS_USXG_AN_LINK_STS);
-+
-+ if (!state->link)
-+ return;
-+
-+ switch (FIELD_GET(XPCS_USXG_AN_SPEED_MASK, val)) {
-+ case XPCS_USXG_AN_SPEED_10000:
-+ state->speed = SPEED_10000;
-+ break;
-+ case XPCS_USXG_AN_SPEED_5000:
-+ state->speed = SPEED_5000;
-+ break;
-+ case XPCS_USXG_AN_SPEED_2500:
-+ state->speed = SPEED_2500;
-+ break;
-+ case XPCS_USXG_AN_SPEED_1000:
-+ state->speed = SPEED_1000;
-+ break;
-+ case XPCS_USXG_AN_SPEED_100:
-+ state->speed = SPEED_100;
-+ break;
-+ case XPCS_USXG_AN_SPEED_10:
-+ state->speed = SPEED_10;
-+ break;
-+ default:
-+ return;
-+ }
-+
-+ if (val & XPCS_USXG_AN_DUPLEX_FULL)
-+ state->duplex = DUPLEX_FULL;
-+ else
-+ state->duplex = DUPLEX_HALF;
-+}
-+
-+static int ipq_unipcs_config_mode(struct ipq_uniphy_pcs *qunipcs,
-+ phy_interface_t interface)
-+{
-+ unsigned long rate = 0;
-+ u32 val;
-+ int ret;
-+
-+ /* Assert XPCS reset */
-+ reset_control_assert(qunipcs->reset[XPCS_RESET]);
-+
-+ /* Config PCS interface mode */
-+ switch (interface) {
-+ case PHY_INTERFACE_MODE_SGMII:
-+ rate = 125000000;
-+ /* Select Qualcomm SGMII AN mode */
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
-+ PCS_MODE_SEL_MASK | PCS_MODE_AN_MODE,
-+ PCS_MODE_SGMII);
-+ break;
-+ case PHY_INTERFACE_MODE_QSGMII:
-+ rate = 125000000;
-+ /* Select Qualcomm SGMII AN mode */
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
-+ PCS_MODE_SEL_MASK | PCS_MODE_AN_MODE,
-+ PCS_MODE_QSGMII);
-+ break;
-+ case PHY_INTERFACE_MODE_PSGMII:
-+ rate = 125000000;
-+ /* Select Qualcomm SGMII AN mode */
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
-+ PCS_MODE_SEL_MASK | PCS_MODE_AN_MODE,
-+ PCS_MODE_PSGMII);
-+ break;
-+ case PHY_INTERFACE_MODE_USXGMII:
-+ rate = 312500000;
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
-+ PCS_MODE_SEL_MASK,
-+ PCS_MODE_XPCS);
-+ break;
-+ default:
-+ dev_err(qunipcs->dev,
-+ "interface %s not supported\n", phy_modes(interface));
-+ return -EOPNOTSUPP;
-+ }
-+
-+ /* PCS PLL reset */
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_PLL_RESET, PCS_ANA_SW_RESET, 0);
-+ fsleep(10000);
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_PLL_RESET,
-+ PCS_ANA_SW_RESET, PCS_ANA_SW_RESET);
-+
-+ /* Wait for calibration completion */
-+ ret = read_poll_timeout(ipq_unipcs_reg_read32, val,
-+ val & PCS_CALIBRATION_DONE,
-+ 1000, 100000, true,
-+ qunipcs, PCS_CALIBRATION);
-+ if (ret) {
-+ dev_err(qunipcs->dev, "UNIPHY PCS calibration timed-out\n");
-+ return ret;
-+ }
-+
-+ /* Configure raw clock rate */
-+ clk_set_rate(qunipcs->raw_clk[PCS_RAW_RX_CLK].hw.clk, rate);
-+ clk_set_rate(qunipcs->raw_clk[PCS_RAW_TX_CLK].hw.clk, rate);
-+
-+ return 0;
-+}
-+
-+static int ipq_unipcs_config_sgmii(struct ipq_uniphy_pcs *qunipcs,
-+ int channel,
-+ unsigned int neg_mode,
-+ phy_interface_t interface)
-+{
-+ int ret;
-+
-+ /* PCS configurations shared by multi channels should be
-+ * configured for only once.
-+ */
-+ if (phy_interface_num_ports(interface) > 1)
-+ mutex_lock(&qunipcs->shared_lock);
-+
-+ if (qunipcs->interface != interface) {
-+ ret = ipq_unipcs_config_mode(qunipcs, interface);
-+ if (ret)
-+ goto err;
-+
-+ qunipcs->interface = interface;
-+ }
-+
-+ if (phy_interface_num_ports(interface) > 1)
-+ mutex_unlock(&qunipcs->shared_lock);
-+
-+ /* In-band autoneg mode is enabled by default for each PCS channel */
-+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
-+ return 0;
-+
-+ /* Force speed mode */
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
-+ PCS_CHANNEL_FORCE_MODE, PCS_CHANNEL_FORCE_MODE);
-+
-+ return 0;
-+
-+err:
-+ if (phy_interface_num_ports(interface) > 1)
-+ mutex_unlock(&qunipcs->shared_lock);
-+
-+ return ret;
-+}
-+
-+static int ipq_unipcs_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
-+ unsigned int neg_mode,
-+ phy_interface_t interface)
-+{
-+ int ret;
-+
-+ if (qunipcs->interface != interface) {
-+ ret = ipq_unipcs_config_mode(qunipcs, interface);
-+ if (ret)
-+ return ret;
-+
-+ /* Deassert XPCS and configure XPCS USXGMII */
-+ reset_control_deassert(qunipcs->reset[XPCS_RESET]);
-+
-+ ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_CTRL,
-+ XPCS_USXG_EN, XPCS_USXG_EN);
-+
-+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
-+ ipq_unipcs_reg_modify32(qunipcs, XPCS_MII_AN_CTRL,
-+ XPCS_MII_AN_8BIT,
-+ XPCS_MII_AN_8BIT);
-+
-+ ipq_unipcs_reg_modify32(qunipcs, XPCS_MII_CTRL,
-+ XPCS_MII_AN_EN, XPCS_MII_AN_EN);
-+ }
-+
-+ qunipcs->interface = interface;
-+ }
-+
-+ return 0;
-+}
-+
-+static unsigned long ipq_unipcs_clock_rate_get_gmii(int speed)
-+{
-+ unsigned long rate = 0;
-+
-+ switch (speed) {
-+ case SPEED_1000:
-+ rate = 125000000;
-+ break;
-+ case SPEED_100:
-+ rate = 25000000;
-+ break;
-+ case SPEED_10:
-+ rate = 2500000;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ return rate;
-+}
-+
-+static unsigned long ipq_unipcs_clock_rate_get_xgmii(int speed)
-+{
-+ unsigned long rate = 0;
-+
-+ switch (speed) {
-+ case SPEED_10000:
-+ rate = 312500000;
-+ break;
-+ case SPEED_5000:
-+ rate = 156250000;
-+ break;
-+ case SPEED_2500:
-+ rate = 78125000;
-+ break;
-+ case SPEED_1000:
-+ rate = 125000000;
-+ break;
-+ case SPEED_100:
-+ rate = 12500000;
-+ break;
-+ case SPEED_10:
-+ rate = 1250000;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ return rate;
-+}
-+
-+static void
-+ipq_unipcs_link_up_clock_rate_set(struct ipq_uniphy_pcs_ch *qunipcs_ch,
-+ phy_interface_t interface,
-+ int speed)
-+{
-+ struct ipq_uniphy_pcs *qunipcs = qunipcs_ch->qunipcs;
-+ unsigned long rate = 0;
-+
-+ switch (interface) {
-+ case PHY_INTERFACE_MODE_SGMII:
-+ case PHY_INTERFACE_MODE_QSGMII:
-+ case PHY_INTERFACE_MODE_PSGMII:
-+ rate = ipq_unipcs_clock_rate_get_gmii(speed);
-+ break;
-+ case PHY_INTERFACE_MODE_USXGMII:
-+ rate = ipq_unipcs_clock_rate_get_xgmii(speed);
-+ break;
-+ default:
-+ dev_err(qunipcs->dev,
-+ "interface %s not supported\n", phy_modes(interface));
-+ return;
-+ }
-+
-+ if (rate == 0) {
-+ dev_err(qunipcs->dev, "Invalid PCS clock rate\n");
-+ return;
-+ }
-+
-+ clk_set_rate(qunipcs_ch->clk[PCS_CH_RX_CLK], rate);
-+ clk_set_rate(qunipcs_ch->clk[PCS_CH_TX_CLK], rate);
-+ fsleep(10000);
-+}
-+
-+static void ipq_unipcs_link_up_config_sgmii(struct ipq_uniphy_pcs *qunipcs,
-+ int channel,
-+ unsigned int neg_mode,
-+ int speed)
-+{
-+ /* No need to config PCS speed if in-band autoneg is enabled */
-+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
-+ goto pcs_adapter_reset;
-+
-+ /* PCS speed set for force mode */
-+ switch (speed) {
-+ case SPEED_1000:
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
-+ PCS_CHANNEL_SPEED_MASK,
-+ PCS_CHANNEL_SPEED_1000);
-+ break;
-+ case SPEED_100:
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
-+ PCS_CHANNEL_SPEED_MASK,
-+ PCS_CHANNEL_SPEED_100);
-+ break;
-+ case SPEED_10:
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
-+ PCS_CHANNEL_SPEED_MASK,
-+ PCS_CHANNEL_SPEED_10);
-+ break;
-+ default:
-+ dev_err(qunipcs->dev, "Force speed %d not supported\n", speed);
-+ return;
-+ }
-+
-+pcs_adapter_reset:
-+ /* PCS channel adapter reset */
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
-+ PCS_CHANNEL_ADPT_RESET,
-+ 0);
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
-+ PCS_CHANNEL_ADPT_RESET,
-+ PCS_CHANNEL_ADPT_RESET);
-+}
-+
-+static void ipq_unipcs_link_up_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
-+ int speed)
-+{
-+ u32 val;
-+
-+ switch (speed) {
-+ case SPEED_10000:
-+ val = XPCS_SPEED_10000;
-+ break;
-+ case SPEED_5000:
-+ val = XPCS_SPEED_5000;
-+ break;
-+ case SPEED_2500:
-+ val = XPCS_SPEED_2500;
-+ break;
-+ case SPEED_1000:
-+ val = XPCS_SPEED_1000;
-+ break;
-+ case SPEED_100:
-+ val = XPCS_SPEED_100;
-+ break;
-+ case SPEED_10:
-+ val = XPCS_SPEED_10;
-+ break;
-+ default:
-+ return;
-+ }
-+
-+ /* USXGMII only support full duplex mode */
-+ val |= XPCS_DUPLEX_FULL;
-+
-+ /* Config XPCS speed */
-+ ipq_unipcs_reg_modify32(qunipcs, XPCS_MII_CTRL,
-+ XPCS_SPEED_MASK | XPCS_DUPLEX_FULL,
-+ val);
-+
-+ /* XPCS adapter reset */
-+ ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_CTRL,
-+ XPCS_USXG_ADPT_RESET,
-+ XPCS_USXG_ADPT_RESET);
-+}
-+
-+static void ipq_unipcs_get_state(struct phylink_pcs *pcs,
-+ struct phylink_link_state *state)
-+{
-+ struct ipq_uniphy_pcs_ch *qunipcs_ch = phylink_pcs_to_unipcs(pcs);
-+ struct ipq_uniphy_pcs *qunipcs = qunipcs_ch->qunipcs;
-+ int channel = qunipcs_ch->channel;
-+
-+ switch (state->interface) {
-+ case PHY_INTERFACE_MODE_SGMII:
-+ case PHY_INTERFACE_MODE_QSGMII:
-+ case PHY_INTERFACE_MODE_PSGMII:
-+ ipq_unipcs_get_state_sgmii(qunipcs, channel, state);
-+ break;
-+ case PHY_INTERFACE_MODE_USXGMII:
-+ ipq_unipcs_get_state_usxgmii(qunipcs, state);
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ dev_dbg(qunipcs->dev,
-+ "mode=%s/%s/%s link=%u\n",
-+ phy_modes(state->interface),
-+ phy_speed_to_str(state->speed),
-+ phy_duplex_to_str(state->duplex),
-+ state->link);
-+}
-+
-+static int ipq_unipcs_config(struct phylink_pcs *pcs,
-+ unsigned int neg_mode,
-+ phy_interface_t interface,
-+ const unsigned long *advertising,
-+ bool permit)
-+{
-+ struct ipq_uniphy_pcs_ch *qunipcs_ch = phylink_pcs_to_unipcs(pcs);
-+ struct ipq_uniphy_pcs *qunipcs = qunipcs_ch->qunipcs;
-+ int channel = qunipcs_ch->channel;
-+
-+ switch (interface) {
-+ case PHY_INTERFACE_MODE_SGMII:
-+ case PHY_INTERFACE_MODE_QSGMII:
-+ case PHY_INTERFACE_MODE_PSGMII:
-+ return ipq_unipcs_config_sgmii(qunipcs, channel,
-+ neg_mode, interface);
-+ case PHY_INTERFACE_MODE_USXGMII:
-+ return ipq_unipcs_config_usxgmii(qunipcs,
-+ neg_mode, interface);
-+ default:
-+ dev_err(qunipcs->dev,
-+ "interface %s not supported\n", phy_modes(interface));
-+ return -EOPNOTSUPP;
-+ };
-+}
-+
-+static void ipq_unipcs_link_up(struct phylink_pcs *pcs,
-+ unsigned int neg_mode,
-+ phy_interface_t interface,
-+ int speed, int duplex)
-+{
-+ struct ipq_uniphy_pcs_ch *qunipcs_ch = phylink_pcs_to_unipcs(pcs);
-+ struct ipq_uniphy_pcs *qunipcs = qunipcs_ch->qunipcs;
-+ int channel = qunipcs_ch->channel;
-+
-+ /* Configure PCS channel interface clock rate */
-+ ipq_unipcs_link_up_clock_rate_set(qunipcs_ch, interface, speed);
-+
-+ /* Configure PCS speed and reset PCS adapter */
-+ switch (interface) {
-+ case PHY_INTERFACE_MODE_SGMII:
-+ case PHY_INTERFACE_MODE_QSGMII:
-+ case PHY_INTERFACE_MODE_PSGMII:
-+ ipq_unipcs_link_up_config_sgmii(qunipcs, channel,
-+ neg_mode, speed);
-+ break;
-+ case PHY_INTERFACE_MODE_USXGMII:
-+ ipq_unipcs_link_up_config_usxgmii(qunipcs, speed);
-+ break;
-+ default:
-+ dev_err(qunipcs->dev,
-+ "interface %s not supported\n", phy_modes(interface));
-+ break;
-+ }
-+}
-+
-+static const struct phylink_pcs_ops ipq_unipcs_phylink_ops = {
-+ .pcs_get_state = ipq_unipcs_get_state,
-+ .pcs_config = ipq_unipcs_config,
-+ .pcs_link_up = ipq_unipcs_link_up,
-+};
-+
-+/**
-+ * ipq_unipcs_create() - Create Qualcomm IPQ UNIPHY PCS
-+ * @np: Device tree node to the PCS
-+ *
-+ * Description: Create a phylink PCS instance for a PCS node @np.
-+ *
-+ * Return: A pointer to the phylink PCS instance or an error-pointer value.
-+ */
-+struct phylink_pcs *ipq_unipcs_create(struct device_node *np)
-+{
-+ struct ipq_uniphy_pcs_ch *qunipcs_ch;
-+ struct ipq_uniphy_pcs *qunipcs;
-+ struct device_node *uniphy_np;
-+ struct platform_device *pdev;
-+ u32 channel;
-+ int i, j;
-+
-+ if (!of_device_is_available(np))
-+ return ERR_PTR(-ENODEV);
-+
-+ if (of_property_read_u32(np, "reg", &channel))
-+ return ERR_PTR(-EINVAL);
-+
-+ if (channel >= PCS_MAX_CHANNELS)
-+ return ERR_PTR(-EINVAL);
-+
-+ uniphy_np = of_get_parent(np);
-+ if (!uniphy_np)
-+ return ERR_PTR(-ENODEV);
-+
-+ if (!of_device_is_available(uniphy_np)) {
-+ of_node_put(uniphy_np);
-+ return ERR_PTR(-ENODEV);
-+ }
-+
-+ pdev = of_find_device_by_node(uniphy_np);
-+ of_node_put(uniphy_np);
-+ if (!pdev)
-+ return ERR_PTR(-ENODEV);
-+
-+ qunipcs = platform_get_drvdata(pdev);
-+ platform_device_put(pdev);
-+
-+ /* If probe is not yet completed, return DEFER to
-+ * the dependent driver.
-+ */
-+ if (!qunipcs)
-+ return ERR_PTR(-EPROBE_DEFER);
-+
-+ qunipcs_ch = kzalloc(sizeof(*qunipcs_ch), GFP_KERNEL);
-+ if (!qunipcs_ch)
-+ return ERR_PTR(-ENOMEM);
-+
-+ qunipcs_ch->qunipcs = qunipcs;
-+ qunipcs_ch->channel = channel;
-+ qunipcs_ch->pcs.ops = &ipq_unipcs_phylink_ops;
-+ qunipcs_ch->pcs.neg_mode = true;
-+ qunipcs_ch->pcs.poll = true;
-+
-+ for (i = 0; i < PCS_CH_CLK_MAX; i++) {
-+ qunipcs_ch->clk[i] = of_clk_get_by_name(np,
-+ pcs_ch_clock_name[i]);
-+ if (IS_ERR(qunipcs_ch->clk[i])) {
-+ dev_err(qunipcs->dev,
-+ "Failed to get PCS channel %d clock ID %s\n",
-+ channel, pcs_ch_clock_name[i]);
-+ goto free_pcs;
-+ }
-+
-+ clk_prepare_enable(qunipcs_ch->clk[i]);
-+ }
-+
-+ return &qunipcs_ch->pcs;
-+
-+free_pcs:
-+ for (j = 0; j < i; j++) {
-+ clk_disable_unprepare(qunipcs_ch->clk[j]);
-+ clk_put(qunipcs_ch->clk[j]);
-+ }
-+
-+ kfree(qunipcs_ch);
-+ return ERR_PTR(-ENODEV);
-+}
-+EXPORT_SYMBOL(ipq_unipcs_create);
-+
-+/**
-+ * ipq_unipcs_destroy() - Destroy Qualcomm IPQ UNIPHY PCS
-+ * @pcs: PCS instance
-+ *
-+ * Description: Destroy a phylink PCS instance.
-+ */
-+void ipq_unipcs_destroy(struct phylink_pcs *pcs)
-+{
-+ struct ipq_uniphy_pcs_ch *qunipcs_ch;
-+ int i;
-+
-+ if (!pcs)
-+ return;
-+
-+ qunipcs_ch = phylink_pcs_to_unipcs(pcs);
-+
-+ for (i = 0; i < PCS_CH_CLK_MAX; i++) {
-+ clk_disable_unprepare(qunipcs_ch->clk[i]);
-+ clk_put(qunipcs_ch->clk[i]);
-+ }
-+
-+ kfree(qunipcs_ch);
-+}
-+EXPORT_SYMBOL(ipq_unipcs_destroy);
-+
-+static int ipq_uniphy_clk_register(struct ipq_uniphy_pcs *qunipcs)
-+{
-+ struct ipq_unipcs_raw_clk *raw_clk;
-+ struct device *dev = qunipcs->dev;
-+ struct clk_hw_onecell_data *data;
-+
-+ struct clk_init_data init = { };
-+ int i, ret;
-+
-+ data = devm_kzalloc(dev,
-+ struct_size(data, hws, PCS_RAW_CLK_MAX),
-+ GFP_KERNEL);
-+ if (!data)
-+ return -ENOMEM;
-+
-+ data->num = PCS_RAW_CLK_MAX;
-+ for (i = 0; i < PCS_RAW_CLK_MAX; i++) {
-+ ret = of_property_read_string_index(dev->of_node,
-+ "clock-output-names",
-+ i, &init.name);
-+ if (ret) {
-+ dev_err(dev,
-+ "%pOFn: No clock-output-names\n", dev->of_node);
-+ return ret;
-+ }
-+
-+ init.ops = &ipq_unipcs_raw_clk_ops;
-+ raw_clk = &qunipcs->raw_clk[i];
-+
-+ raw_clk->rate = 125000000;
-+ raw_clk->hw.init = &init;
-+
-+ ret = devm_clk_hw_register(dev, &raw_clk->hw);
-+ if (ret) {
-+ dev_err(dev, "Failed to register UNIPHY PCS raw clock\n");
-+ return ret;
-+ }
-+
-+ data->hws[i] = &raw_clk->hw;
-+ }
-+
-+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, data);
-+}
-+
-+static int ipq_uniphy_probe(struct platform_device *pdev)
-+{
-+ struct device *dev = &pdev->dev;
-+ struct ipq_uniphy_pcs *priv;
-+ int i, ret;
-+
-+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-+ if (!priv)
-+ return -ENOMEM;
-+
-+ priv->dev = dev;
-+
-+ priv->base = devm_platform_ioremap_resource(pdev, 0);
-+ if (IS_ERR(priv->base))
-+ return PTR_ERR(priv->base);
-+
-+ for (i = 0; i < PCS_CLK_MAX; i++) {
-+ priv->clk[i] = devm_clk_get_optional_enabled(dev,
-+ pcs_clock_name[i]);
-+
-+ if (IS_ERR(priv->clk[i]))
-+ dev_err(dev, "Failed to get the clock ID %s\n",
-+ pcs_clock_name[i]);
-+ }
-+
-+ for (i = 0; i < PCS_RESET_MAX; i++) {
-+ priv->reset[i] =
-+ devm_reset_control_get_optional_exclusive(dev,
-+ pcs_reset_name[i]);
-+
-+ if (IS_ERR(priv->reset[i]))
-+ dev_err(dev, "Failed to get the reset ID %s\n",
-+ pcs_reset_name[i]);
-+ }
-+
-+ /* Set UNIPHY PCS system and AHB clock rate */
-+ clk_set_rate(priv->clk[PCS_SYS_CLK], 24000000);
-+ clk_set_rate(priv->clk[PCS_AHB_CLK], 100000000);
-+
-+ ret = ipq_uniphy_clk_register(priv);
-+ if (ret)
-+ return ret;
-+
-+ mutex_init(&priv->shared_lock);
-+
-+ platform_set_drvdata(pdev, priv);
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id ipq_uniphy_of_mtable[] = {
-+ { .compatible = "qcom,ipq5332-uniphy" },
-+ { .compatible = "qcom,ipq9574-uniphy" },
-+ { /* sentinel */ },
-+};
-+MODULE_DEVICE_TABLE(of, ipq_uniphy_of_mtable);
-+
-+static struct platform_driver ipq_uniphy_driver = {
-+ .driver = {
-+ .name = "ipq_uniphy",
-+ .of_match_table = ipq_uniphy_of_mtable,
-+ },
-+ .probe = ipq_uniphy_probe,
-+};
-+module_platform_driver(ipq_uniphy_driver);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("Qualcomm IPQ UNIPHY PCS driver");
-+MODULE_AUTHOR("Lei Wei <quic_leiwei@quicinc.com>");
---- /dev/null
-+++ b/include/linux/pcs/pcs-qcom-ipq-uniphy.h
-@@ -0,0 +1,13 @@
-+/* SPDX-License-Identifier: GPL-2.0-only */
-+/*
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ *
-+ */
-+
-+#ifndef __LINUX_PCS_QCOM_IPQ_UNIPHY_H
-+#define __LINUX_PCS_QCOM_IPQ_UNIPHY_H
-+
-+struct phylink_pcs *ipq_unipcs_create(struct device_node *np);
-+void ipq_unipcs_destroy(struct phylink_pcs *pcs);
-+
-+#endif /* __LINUX_PCS_QCOM_IPQ_UNIPHY_H */
+++ /dev/null
-From f23eb497c891985126a065f950bc61e9c404bb12 Mon Sep 17 00:00:00 2001
-From: Lei Wei <quic_leiwei@quicinc.com>
-Date: Wed, 6 Mar 2024 17:40:52 +0800
-Subject: [PATCH 12/50] net: pcs: Add 10GBASER interface mode support to IPQ
- UNIPHY PCS driver
-
-10GBASER mode is used when PCS connects with a 10G SFP module.
-
-Change-Id: Ifc3c3bb23811807a9b34e88771aab2c830c2327c
-Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
----
- drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 48 +++++++++++++++++++++++++++
- 1 file changed, 48 insertions(+)
-
---- a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-+++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-@@ -57,6 +57,9 @@
- FIELD_PREP(GENMASK(9, 2), \
- FIELD_GET(XPCS_INDIRECT_ADDR_L, reg)))
-
-+#define XPCS_10GBASER_STS 0x30020
-+#define XPCS_10GBASER_LINK_STS BIT(12)
-+
- #define XPCS_DIG_CTRL 0x38000
- #define XPCS_USXG_ADPT_RESET BIT(10)
- #define XPCS_USXG_EN BIT(9)
-@@ -320,6 +323,23 @@ static void ipq_unipcs_get_state_usxgmii
- state->duplex = DUPLEX_HALF;
- }
-
-+static void ipq_unipcs_get_state_10gbaser(struct ipq_uniphy_pcs *qunipcs,
-+ struct phylink_link_state *state)
-+{
-+ u32 val;
-+
-+ val = ipq_unipcs_reg_read32(qunipcs, XPCS_10GBASER_STS);
-+
-+ state->link = !!(val & XPCS_10GBASER_LINK_STS);
-+
-+ if (!state->link)
-+ return;
-+
-+ state->speed = SPEED_10000;
-+ state->duplex = DUPLEX_FULL;
-+ state->pause |= MLO_PAUSE_TXRX_MASK;
-+}
-+
- static int ipq_unipcs_config_mode(struct ipq_uniphy_pcs *qunipcs,
- phy_interface_t interface)
- {
-@@ -354,6 +374,7 @@ static int ipq_unipcs_config_mode(struct
- PCS_MODE_PSGMII);
- break;
- case PHY_INTERFACE_MODE_USXGMII:
-+ case PHY_INTERFACE_MODE_10GBASER:
- rate = 312500000;
- ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
- PCS_MODE_SEL_MASK,
-@@ -461,6 +482,25 @@ static int ipq_unipcs_config_usxgmii(str
- return 0;
- }
-
-+static int ipq_unipcs_config_10gbaser(struct ipq_uniphy_pcs *qunipcs,
-+ phy_interface_t interface)
-+{
-+ int ret;
-+
-+ if (qunipcs->interface != interface) {
-+ ret = ipq_unipcs_config_mode(qunipcs, interface);
-+ if (ret)
-+ return ret;
-+
-+ /* Deassert XPCS */
-+ reset_control_deassert(qunipcs->reset[XPCS_RESET]);
-+
-+ qunipcs->interface = interface;
-+ }
-+
-+ return 0;
-+}
-+
- static unsigned long ipq_unipcs_clock_rate_get_gmii(int speed)
- {
- unsigned long rate = 0;
-@@ -527,6 +567,7 @@ ipq_unipcs_link_up_clock_rate_set(struct
- rate = ipq_unipcs_clock_rate_get_gmii(speed);
- break;
- case PHY_INTERFACE_MODE_USXGMII:
-+ case PHY_INTERFACE_MODE_10GBASER:
- rate = ipq_unipcs_clock_rate_get_xgmii(speed);
- break;
- default:
-@@ -644,6 +685,9 @@ static void ipq_unipcs_get_state(struct
- case PHY_INTERFACE_MODE_USXGMII:
- ipq_unipcs_get_state_usxgmii(qunipcs, state);
- break;
-+ case PHY_INTERFACE_MODE_10GBASER:
-+ ipq_unipcs_get_state_10gbaser(qunipcs, state);
-+ break;
- default:
- break;
- }
-@@ -675,6 +719,8 @@ static int ipq_unipcs_config(struct phyl
- case PHY_INTERFACE_MODE_USXGMII:
- return ipq_unipcs_config_usxgmii(qunipcs,
- neg_mode, interface);
-+ case PHY_INTERFACE_MODE_10GBASER:
-+ return ipq_unipcs_config_10gbaser(qunipcs, interface);
- default:
- dev_err(qunipcs->dev,
- "interface %s not supported\n", phy_modes(interface));
-@@ -705,6 +751,8 @@ static void ipq_unipcs_link_up(struct ph
- case PHY_INTERFACE_MODE_USXGMII:
- ipq_unipcs_link_up_config_usxgmii(qunipcs, speed);
- break;
-+ case PHY_INTERFACE_MODE_10GBASER:
-+ break;
- default:
- dev_err(qunipcs->dev,
- "interface %s not supported\n", phy_modes(interface));
+++ /dev/null
-From fcd1c53b460aa39cfd15f842126af62b27a4fad5 Mon Sep 17 00:00:00 2001
-From: Lei Wei <quic_leiwei@quicinc.com>
-Date: Tue, 2 Apr 2024 18:28:42 +0800
-Subject: [PATCH 13/50] net: pcs: Add 2500BASEX interface mode support to IPQ
- UNIPHY PCS driver
-
-2500BASEX mode is used when PCS connects with QCA8386 switch in a fixed
-2500M link. It is also used when PCS connectes with QCA8081 PHY which
-works at 2500M link speed. In addition, it can be also used when PCS
-connects with a 2.5G SFP module.
-
-Change-Id: I3fe61113c1b3685debc20659736a9488216a029d
-Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
----
- drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 95 +++++++++++++++++++++++++++
- 1 file changed, 95 insertions(+)
-
---- a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-+++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-@@ -25,6 +25,7 @@
- #define PCS_MODE_SGMII FIELD_PREP(PCS_MODE_SEL_MASK, 0x4)
- #define PCS_MODE_QSGMII FIELD_PREP(PCS_MODE_SEL_MASK, 0x1)
- #define PCS_MODE_PSGMII FIELD_PREP(PCS_MODE_SEL_MASK, 0x2)
-+#define PCS_MODE_SGMII_PLUS FIELD_PREP(PCS_MODE_SEL_MASK, 0x8)
- #define PCS_MODE_XPCS FIELD_PREP(PCS_MODE_SEL_MASK, 0x10)
- #define PCS_MODE_AN_MODE BIT(0)
-
-@@ -282,6 +283,24 @@ static void ipq_unipcs_get_state_sgmii(s
- state->pause |= MLO_PAUSE_RX;
- }
-
-+static void ipq_unipcs_get_state_2500basex(struct ipq_uniphy_pcs *qunipcs,
-+ int channel,
-+ struct phylink_link_state *state)
-+{
-+ u32 val;
-+
-+ val = ipq_unipcs_reg_read32(qunipcs, PCS_CHANNEL_STS(channel));
-+
-+ state->link = !!(val & PCS_CHANNEL_LINK_STS);
-+
-+ if (!state->link)
-+ return;
-+
-+ state->speed = SPEED_2500;
-+ state->duplex = DUPLEX_FULL;
-+ state->pause |= MLO_PAUSE_TXRX_MASK;
-+}
-+
- static void ipq_unipcs_get_state_usxgmii(struct ipq_uniphy_pcs *qunipcs,
- struct phylink_link_state *state)
- {
-@@ -373,6 +392,12 @@ static int ipq_unipcs_config_mode(struct
- PCS_MODE_SEL_MASK | PCS_MODE_AN_MODE,
- PCS_MODE_PSGMII);
- break;
-+ case PHY_INTERFACE_MODE_2500BASEX:
-+ rate = 312500000;
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
-+ PCS_MODE_SEL_MASK,
-+ PCS_MODE_SGMII_PLUS);
-+ break;
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_10GBASER:
- rate = 312500000;
-@@ -450,6 +475,22 @@ err:
- return ret;
- }
-
-+static int ipq_unipcs_config_2500basex(struct ipq_uniphy_pcs *qunipcs,
-+ phy_interface_t interface)
-+{
-+ int ret;
-+
-+ if (qunipcs->interface != interface) {
-+ ret = ipq_unipcs_config_mode(qunipcs, interface);
-+ if (ret)
-+ return ret;
-+
-+ qunipcs->interface = interface;
-+ }
-+
-+ return 0;
-+}
-+
- static int ipq_unipcs_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
- unsigned int neg_mode,
- phy_interface_t interface)
-@@ -522,6 +563,21 @@ static unsigned long ipq_unipcs_clock_ra
- return rate;
- }
-
-+static unsigned long ipq_unipcs_clock_rate_get_gmiiplus(int speed)
-+{
-+ unsigned long rate = 0;
-+
-+ switch (speed) {
-+ case SPEED_2500:
-+ rate = 312500000;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ return rate;
-+}
-+
- static unsigned long ipq_unipcs_clock_rate_get_xgmii(int speed)
- {
- unsigned long rate = 0;
-@@ -566,6 +622,9 @@ ipq_unipcs_link_up_clock_rate_set(struct
- case PHY_INTERFACE_MODE_PSGMII:
- rate = ipq_unipcs_clock_rate_get_gmii(speed);
- break;
-+ case PHY_INTERFACE_MODE_2500BASEX:
-+ rate = ipq_unipcs_clock_rate_get_gmiiplus(speed);
-+ break;
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_10GBASER:
- rate = ipq_unipcs_clock_rate_get_xgmii(speed);
-@@ -627,6 +686,21 @@ pcs_adapter_reset:
- PCS_CHANNEL_ADPT_RESET);
- }
-
-+static void ipq_unipcs_link_up_config_2500basex(struct ipq_uniphy_pcs *qunipcs,
-+ int channel,
-+ int speed)
-+{
-+ /* 2500BASEX do not support autoneg and do not need to
-+ * configure PCS speed, only reset PCS adapter here.
-+ */
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
-+ PCS_CHANNEL_ADPT_RESET,
-+ 0);
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
-+ PCS_CHANNEL_ADPT_RESET,
-+ PCS_CHANNEL_ADPT_RESET);
-+}
-+
- static void ipq_unipcs_link_up_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
- int speed)
- {
-@@ -669,6 +743,17 @@ static void ipq_unipcs_link_up_config_us
- XPCS_USXG_ADPT_RESET);
- }
-
-+static int ipq_unipcs_validate(struct phylink_pcs *pcs,
-+ unsigned long *supported,
-+ const struct phylink_link_state *state)
-+{
-+ /* In-band autoneg is not supported for 2500BASEX */
-+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
-+ phylink_clear(supported, Autoneg);
-+
-+ return 0;
-+}
-+
- static void ipq_unipcs_get_state(struct phylink_pcs *pcs,
- struct phylink_link_state *state)
- {
-@@ -682,6 +767,9 @@ static void ipq_unipcs_get_state(struct
- case PHY_INTERFACE_MODE_PSGMII:
- ipq_unipcs_get_state_sgmii(qunipcs, channel, state);
- break;
-+ case PHY_INTERFACE_MODE_2500BASEX:
-+ ipq_unipcs_get_state_2500basex(qunipcs, channel, state);
-+ break;
- case PHY_INTERFACE_MODE_USXGMII:
- ipq_unipcs_get_state_usxgmii(qunipcs, state);
- break;
-@@ -716,6 +804,8 @@ static int ipq_unipcs_config(struct phyl
- case PHY_INTERFACE_MODE_PSGMII:
- return ipq_unipcs_config_sgmii(qunipcs, channel,
- neg_mode, interface);
-+ case PHY_INTERFACE_MODE_2500BASEX:
-+ return ipq_unipcs_config_2500basex(qunipcs, interface);
- case PHY_INTERFACE_MODE_USXGMII:
- return ipq_unipcs_config_usxgmii(qunipcs,
- neg_mode, interface);
-@@ -748,6 +838,10 @@ static void ipq_unipcs_link_up(struct ph
- ipq_unipcs_link_up_config_sgmii(qunipcs, channel,
- neg_mode, speed);
- break;
-+ case PHY_INTERFACE_MODE_2500BASEX:
-+ ipq_unipcs_link_up_config_2500basex(qunipcs,
-+ channel, speed);
-+ break;
- case PHY_INTERFACE_MODE_USXGMII:
- ipq_unipcs_link_up_config_usxgmii(qunipcs, speed);
- break;
-@@ -761,6 +855,7 @@ static void ipq_unipcs_link_up(struct ph
- }
-
- static const struct phylink_pcs_ops ipq_unipcs_phylink_ops = {
-+ .pcs_validate = ipq_unipcs_validate,
- .pcs_get_state = ipq_unipcs_get_state,
- .pcs_config = ipq_unipcs_config,
- .pcs_link_up = ipq_unipcs_link_up,
+++ /dev/null
-From 23f3550c387246025ed2971989b747a5936bf080 Mon Sep 17 00:00:00 2001
-From: Lei Wei <quic_leiwei@quicinc.com>
-Date: Tue, 9 Apr 2024 01:07:22 +0800
-Subject: [PATCH 14/50] net:pcs: Add 1000BASEX interface mode support to IPQ
- UNIPHY PCS driver
-
-1000BASEX is used when PCS connects with a 1G SFP module.
-
-Change-Id: Ied7298de3c1ecba74e6457a07fdd6b3ceab79728
-Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
----
- drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 23 +++++++++++++++++++++++
- 1 file changed, 23 insertions(+)
-
---- a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-+++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-@@ -27,6 +27,9 @@
- #define PCS_MODE_PSGMII FIELD_PREP(PCS_MODE_SEL_MASK, 0x2)
- #define PCS_MODE_SGMII_PLUS FIELD_PREP(PCS_MODE_SEL_MASK, 0x8)
- #define PCS_MODE_XPCS FIELD_PREP(PCS_MODE_SEL_MASK, 0x10)
-+#define PCS_MODE_SGMII_CTRL_MASK GENMASK(6, 4)
-+#define PCS_MODE_SGMII_CTRL_1000BASEX FIELD_PREP(PCS_MODE_SGMII_CTRL_MASK, \
-+ 0x0)
- #define PCS_MODE_AN_MODE BIT(0)
-
- #define PCS_CHANNEL_CTRL(x) (0x480 + 0x18 * (x))
-@@ -392,6 +395,13 @@ static int ipq_unipcs_config_mode(struct
- PCS_MODE_SEL_MASK | PCS_MODE_AN_MODE,
- PCS_MODE_PSGMII);
- break;
-+ case PHY_INTERFACE_MODE_1000BASEX:
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
-+ PCS_MODE_SEL_MASK |
-+ PCS_MODE_SGMII_CTRL_MASK,
-+ PCS_MODE_SGMII |
-+ PCS_MODE_SGMII_CTRL_1000BASEX);
-+ break;
- case PHY_INTERFACE_MODE_2500BASEX:
- rate = 312500000;
- ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
-@@ -620,6 +630,7 @@ ipq_unipcs_link_up_clock_rate_set(struct
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_PSGMII:
-+ case PHY_INTERFACE_MODE_1000BASEX:
- rate = ipq_unipcs_clock_rate_get_gmii(speed);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
-@@ -765,6 +776,10 @@ static void ipq_unipcs_get_state(struct
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_PSGMII:
-+ case PHY_INTERFACE_MODE_1000BASEX:
-+ /* SGMII and 1000BASEX in-band autoneg word format are decoded
-+ * by PCS hardware and both placed to the same status register.
-+ */
- ipq_unipcs_get_state_sgmii(qunipcs, channel, state);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
-@@ -802,6 +817,7 @@ static int ipq_unipcs_config(struct phyl
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_PSGMII:
-+ case PHY_INTERFACE_MODE_1000BASEX:
- return ipq_unipcs_config_sgmii(qunipcs, channel,
- neg_mode, interface);
- case PHY_INTERFACE_MODE_2500BASEX:
-@@ -818,6 +834,11 @@ static int ipq_unipcs_config(struct phyl
- };
- }
-
-+static void qcom_ipq_unipcs_an_restart(struct phylink_pcs *pcs)
-+{
-+ /* Currently not used */
-+}
-+
- static void ipq_unipcs_link_up(struct phylink_pcs *pcs,
- unsigned int neg_mode,
- phy_interface_t interface,
-@@ -835,6 +856,7 @@ static void ipq_unipcs_link_up(struct ph
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_PSGMII:
-+ case PHY_INTERFACE_MODE_1000BASEX:
- ipq_unipcs_link_up_config_sgmii(qunipcs, channel,
- neg_mode, speed);
- break;
-@@ -858,6 +880,7 @@ static const struct phylink_pcs_ops ipq_
- .pcs_validate = ipq_unipcs_validate,
- .pcs_get_state = ipq_unipcs_get_state,
- .pcs_config = ipq_unipcs_config,
-+ .pcs_an_restart = qcom_ipq_unipcs_an_restart,
- .pcs_link_up = ipq_unipcs_link_up,
- };
-
+++ /dev/null
-From d96ec0527b0f5618b3a0757b47606705555ee996 Mon Sep 17 00:00:00 2001
-From: Lei Wei <quic_leiwei@quicinc.com>
-Date: Mon, 15 Apr 2024 11:06:02 +0800
-Subject: [PATCH 15/50] net:pcs: Add 10G_QXGMII interface mode support to IPQ
- UNIPHY PCS driver
-
-10G_QXGMII is used when PCS connectes with QCA8084 four ports
-2.5G PHYs.
-
-Change-Id: If3dc92a07ac3e51f7c9473fb05fa0668617916fb
-Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
----
- drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 174 +++++++++++++++++++++-----
- 1 file changed, 142 insertions(+), 32 deletions(-)
-
---- a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-+++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-@@ -50,6 +50,9 @@
- #define PCS_CHANNEL_STS_PAUSE_TX_EN BIT(1)
- #define PCS_CHANNEL_STS_PAUSE_RX_EN BIT(0)
-
-+#define PCS_QP_USXG_OPTION 0x584
-+#define PCS_QP_USXG_GMII_SRC_XPCS BIT(0)
-+
- #define PCS_PLL_RESET 0x780
- #define PCS_ANA_SW_RESET BIT(6)
-
-@@ -65,10 +68,22 @@
- #define XPCS_10GBASER_LINK_STS BIT(12)
-
- #define XPCS_DIG_CTRL 0x38000
-+#define XPCS_SOFT_RESET BIT(15)
- #define XPCS_USXG_ADPT_RESET BIT(10)
- #define XPCS_USXG_EN BIT(9)
-
-+#define XPCS_KR_CTRL 0x38007
-+#define XPCS_USXG_MODE_MASK GENMASK(12, 10)
-+#define XPCS_10G_QXGMII_MODE FIELD_PREP(XPCS_USXG_MODE_MASK, 0x5)
-+
-+#define XPCS_DIG_STS 0x3800a
-+#define XPCS_DIG_STS_AM_COUNT GENMASK(14, 0)
-+
-+#define XPCS_CHANNEL_DIG_CTRL(x) (0x1a8000 + 0x10000 * ((x) - 1))
-+#define XPCS_CHANNEL_USXG_ADPT_RESET BIT(5)
-+
- #define XPCS_MII_CTRL 0x1f0000
-+#define XPCS_CHANNEL_MII_CTRL(x) (0x1a0000 + 0x10000 * ((x) - 1))
- #define XPCS_MII_AN_EN BIT(12)
- #define XPCS_DUPLEX_FULL BIT(8)
- #define XPCS_SPEED_MASK (BIT(13) | BIT(6) | BIT(5))
-@@ -80,9 +95,11 @@
- #define XPCS_SPEED_10 0
-
- #define XPCS_MII_AN_CTRL 0x1f8001
-+#define XPCS_CHANNEL_MII_AN_CTRL(x) (0x1a8001 + 0x10000 * ((x) - 1))
- #define XPCS_MII_AN_8BIT BIT(8)
-
- #define XPCS_MII_AN_INTR_STS 0x1f8002
-+#define XPCS_CHANNEL_MII_AN_INTR_STS(x) (0x1a8002 + 0x10000 * ((x) - 1))
- #define XPCS_USXG_AN_LINK_STS BIT(14)
- #define XPCS_USXG_AN_DUPLEX_FULL BIT(13)
- #define XPCS_USXG_AN_SPEED_MASK GENMASK(12, 10)
-@@ -93,6 +110,10 @@
- #define XPCS_USXG_AN_SPEED_5000 5
- #define XPCS_USXG_AN_SPEED_10000 3
-
-+#define XPCS_XAUI_MODE_CTRL 0x1f8004
-+#define XPCS_CHANNEL_XAUI_MODE_CTRL(x) (0x1a8004 + 0x10000 * ((x) - 1))
-+#define XPCS_TX_IPG_CHECK_DIS BIT(0)
-+
- /* UNIPHY PCS RAW clock ID */
- enum {
- PCS_RAW_RX_CLK = 0,
-@@ -153,6 +174,7 @@ struct ipq_uniphy_pcs {
- struct device *dev;
- phy_interface_t interface;
- struct mutex shared_lock; /* Lock to protect shared config */
-+ spinlock_t reg_lock; /* Lock for register access */
- struct clk *clk[PCS_CLK_MAX];
- struct reset_control *reset[PCS_RESET_MAX];
- struct ipq_unipcs_raw_clk raw_clk[PCS_RAW_CLK_MAX];
-@@ -215,39 +237,55 @@ static const struct clk_ops ipq_unipcs_r
-
- static u32 ipq_unipcs_reg_read32(struct ipq_uniphy_pcs *qunipcs, u32 reg)
- {
-+ u32 val;
-+
- /* PCS use direct AHB access while XPCS use indirect AHB access */
- if (reg >= XPCS_INDIRECT_ADDR) {
-+ /* For XPCS, althrough the register is different for different
-+ * channels, but they use the same indirect AHB address to
-+ * access, so add protects here.
-+ */
-+ spin_lock(&qunipcs->reg_lock);
-+
- writel(FIELD_GET(XPCS_INDIRECT_ADDR_H, reg),
- qunipcs->base + XPCS_INDIRECT_AHB_ADDR);
-- return readl(qunipcs->base + XPCS_INDIRECT_DATA_ADDR(reg));
-+ val = readl(qunipcs->base + XPCS_INDIRECT_DATA_ADDR(reg));
-+
-+ spin_unlock(&qunipcs->reg_lock);
-+ return val;
- } else {
- return readl(qunipcs->base + reg);
- }
- }
-
--static void ipq_unipcs_reg_write32(struct ipq_uniphy_pcs *qunipcs,
-- u32 reg, u32 val)
-+static void ipq_unipcs_reg_modify32(struct ipq_uniphy_pcs *qunipcs,
-+ u32 reg, u32 mask, u32 set)
- {
-+ u32 val;
-+
- if (reg >= XPCS_INDIRECT_ADDR) {
-+ spin_lock(&qunipcs->reg_lock);
-+
-+ /* XPCS read */
- writel(FIELD_GET(XPCS_INDIRECT_ADDR_H, reg),
- qunipcs->base + XPCS_INDIRECT_AHB_ADDR);
-+ val = readl(qunipcs->base + XPCS_INDIRECT_DATA_ADDR(reg));
-+
-+ val &= ~mask;
-+ val |= set;
-+
-+ /* XPCS write */
- writel(val, qunipcs->base + XPCS_INDIRECT_DATA_ADDR(reg));
-+
-+ spin_unlock(&qunipcs->reg_lock);
- } else {
-+ val = readl(qunipcs->base + reg);
-+ val &= ~mask;
-+ val |= set;
- writel(val, qunipcs->base + reg);
- }
- }
-
--static void ipq_unipcs_reg_modify32(struct ipq_uniphy_pcs *qunipcs,
-- u32 reg, u32 mask, u32 set)
--{
-- u32 val;
--
-- val = ipq_unipcs_reg_read32(qunipcs, reg);
-- val &= ~mask;
-- val |= set;
-- ipq_unipcs_reg_write32(qunipcs, reg, val);
--}
--
- static void ipq_unipcs_get_state_sgmii(struct ipq_uniphy_pcs *qunipcs,
- int channel,
- struct phylink_link_state *state)
-@@ -305,11 +343,15 @@ static void ipq_unipcs_get_state_2500bas
- }
-
- static void ipq_unipcs_get_state_usxgmii(struct ipq_uniphy_pcs *qunipcs,
-+ int channel,
- struct phylink_link_state *state)
- {
-- u32 val;
-+ u32 val, reg;
-+
-+ reg = (channel == 0) ? XPCS_MII_AN_INTR_STS :
-+ XPCS_CHANNEL_MII_AN_INTR_STS(channel);
-
-- val = ipq_unipcs_reg_read32(qunipcs, XPCS_MII_AN_INTR_STS);
-+ val = ipq_unipcs_reg_read32(qunipcs, reg);
-
- state->link = !!(val & XPCS_USXG_AN_LINK_STS);
-
-@@ -415,6 +457,15 @@ static int ipq_unipcs_config_mode(struct
- PCS_MODE_SEL_MASK,
- PCS_MODE_XPCS);
- break;
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
-+ rate = 312500000;
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
-+ PCS_MODE_SEL_MASK,
-+ PCS_MODE_XPCS);
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_QP_USXG_OPTION,
-+ PCS_QP_USXG_GMII_SRC_XPCS,
-+ PCS_QP_USXG_GMII_SRC_XPCS);
-+ break;
- default:
- dev_err(qunipcs->dev,
- "interface %s not supported\n", phy_modes(interface));
-@@ -502,35 +553,82 @@ static int ipq_unipcs_config_2500basex(s
- }
-
- static int ipq_unipcs_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
-+ int channel,
- unsigned int neg_mode,
- phy_interface_t interface)
- {
- int ret;
-+ u32 reg;
-+
-+ /* Only in-band autoneg mode is supported currently */
-+ if (neg_mode != PHYLINK_PCS_NEG_INBAND_ENABLED)
-+ return -EOPNOTSUPP;
-+
-+ if (interface == PHY_INTERFACE_MODE_10G_QXGMII)
-+ mutex_lock(&qunipcs->shared_lock);
-
- if (qunipcs->interface != interface) {
- ret = ipq_unipcs_config_mode(qunipcs, interface);
- if (ret)
-- return ret;
-+ goto err;
-
-- /* Deassert XPCS and configure XPCS USXGMII */
-+ /* Deassert XPCS and configure XPCS USXGMII or 10G_QXGMII */
- reset_control_deassert(qunipcs->reset[XPCS_RESET]);
-
- ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_CTRL,
- XPCS_USXG_EN, XPCS_USXG_EN);
-
-- if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
-- ipq_unipcs_reg_modify32(qunipcs, XPCS_MII_AN_CTRL,
-- XPCS_MII_AN_8BIT,
-- XPCS_MII_AN_8BIT);
--
-- ipq_unipcs_reg_modify32(qunipcs, XPCS_MII_CTRL,
-- XPCS_MII_AN_EN, XPCS_MII_AN_EN);
-+ if (interface == PHY_INTERFACE_MODE_10G_QXGMII) {
-+ ipq_unipcs_reg_modify32(qunipcs, XPCS_KR_CTRL,
-+ XPCS_USXG_MODE_MASK,
-+ XPCS_10G_QXGMII_MODE);
-+
-+ /* Set Alignment Marker Interval */
-+ ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_STS,
-+ XPCS_DIG_STS_AM_COUNT,
-+ 0x6018);
-+
-+ ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_CTRL,
-+ XPCS_SOFT_RESET,
-+ XPCS_SOFT_RESET);
- }
-
- qunipcs->interface = interface;
- }
-
-+ if (interface == PHY_INTERFACE_MODE_10G_QXGMII)
-+ mutex_unlock(&qunipcs->shared_lock);
-+
-+ /* Disable Tx IPG check for 10G_QXGMII */
-+ if (interface == PHY_INTERFACE_MODE_10G_QXGMII) {
-+ reg = (channel == 0) ? XPCS_XAUI_MODE_CTRL :
-+ XPCS_CHANNEL_XAUI_MODE_CTRL(channel);
-+
-+ ipq_unipcs_reg_modify32(qunipcs, reg,
-+ XPCS_TX_IPG_CHECK_DIS,
-+ XPCS_TX_IPG_CHECK_DIS);
-+ }
-+
-+ /* Enable autoneg */
-+ reg = (channel == 0) ? XPCS_MII_AN_CTRL :
-+ XPCS_CHANNEL_MII_AN_CTRL(channel);
-+
-+ ipq_unipcs_reg_modify32(qunipcs, reg,
-+ XPCS_MII_AN_8BIT, XPCS_MII_AN_8BIT);
-+
-+ reg = (channel == 0) ? XPCS_MII_CTRL :
-+ XPCS_CHANNEL_MII_CTRL(channel);
-+
-+ ipq_unipcs_reg_modify32(qunipcs, reg,
-+ XPCS_MII_AN_EN, XPCS_MII_AN_EN);
-+
- return 0;
-+
-+err:
-+ if (interface == PHY_INTERFACE_MODE_10G_QXGMII)
-+ mutex_unlock(&qunipcs->shared_lock);
-+
-+ return ret;
- }
-
- static int ipq_unipcs_config_10gbaser(struct ipq_uniphy_pcs *qunipcs,
-@@ -638,6 +736,7 @@ ipq_unipcs_link_up_clock_rate_set(struct
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_10GBASER:
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
- rate = ipq_unipcs_clock_rate_get_xgmii(speed);
- break;
- default:
-@@ -713,9 +812,10 @@ static void ipq_unipcs_link_up_config_25
- }
-
- static void ipq_unipcs_link_up_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
-+ int channel,
- int speed)
- {
-- u32 val;
-+ u32 val, reg;
-
- switch (speed) {
- case SPEED_10000:
-@@ -744,14 +844,20 @@ static void ipq_unipcs_link_up_config_us
- val |= XPCS_DUPLEX_FULL;
-
- /* Config XPCS speed */
-- ipq_unipcs_reg_modify32(qunipcs, XPCS_MII_CTRL,
-+ reg = (channel == 0) ? XPCS_MII_CTRL : XPCS_CHANNEL_MII_CTRL(channel);
-+ ipq_unipcs_reg_modify32(qunipcs, reg,
- XPCS_SPEED_MASK | XPCS_DUPLEX_FULL,
- val);
-
- /* XPCS adapter reset */
-- ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_CTRL,
-- XPCS_USXG_ADPT_RESET,
-- XPCS_USXG_ADPT_RESET);
-+ if (channel == 0)
-+ ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_CTRL,
-+ XPCS_USXG_ADPT_RESET,
-+ XPCS_USXG_ADPT_RESET);
-+ else
-+ ipq_unipcs_reg_modify32(qunipcs, XPCS_CHANNEL_DIG_CTRL(channel),
-+ XPCS_CHANNEL_USXG_ADPT_RESET,
-+ XPCS_CHANNEL_USXG_ADPT_RESET);
- }
-
- static int ipq_unipcs_validate(struct phylink_pcs *pcs,
-@@ -786,7 +892,8 @@ static void ipq_unipcs_get_state(struct
- ipq_unipcs_get_state_2500basex(qunipcs, channel, state);
- break;
- case PHY_INTERFACE_MODE_USXGMII:
-- ipq_unipcs_get_state_usxgmii(qunipcs, state);
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
-+ ipq_unipcs_get_state_usxgmii(qunipcs, channel, state);
- break;
- case PHY_INTERFACE_MODE_10GBASER:
- ipq_unipcs_get_state_10gbaser(qunipcs, state);
-@@ -823,7 +930,8 @@ static int ipq_unipcs_config(struct phyl
- case PHY_INTERFACE_MODE_2500BASEX:
- return ipq_unipcs_config_2500basex(qunipcs, interface);
- case PHY_INTERFACE_MODE_USXGMII:
-- return ipq_unipcs_config_usxgmii(qunipcs,
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
-+ return ipq_unipcs_config_usxgmii(qunipcs, channel,
- neg_mode, interface);
- case PHY_INTERFACE_MODE_10GBASER:
- return ipq_unipcs_config_10gbaser(qunipcs, interface);
-@@ -865,7 +973,8 @@ static void ipq_unipcs_link_up(struct ph
- channel, speed);
- break;
- case PHY_INTERFACE_MODE_USXGMII:
-- ipq_unipcs_link_up_config_usxgmii(qunipcs, speed);
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
-+ ipq_unipcs_link_up_config_usxgmii(qunipcs, channel, speed);
- break;
- case PHY_INTERFACE_MODE_10GBASER:
- break;
-@@ -1082,6 +1191,7 @@ static int ipq_uniphy_probe(struct platf
- return ret;
-
- mutex_init(&priv->shared_lock);
-+ spin_lock_init(&priv->reg_lock);
-
- platform_set_drvdata(pdev, priv);
-
+++ /dev/null
-From a29ee27a42fc208ef1cd99f5014d57dbfe1af3dd Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Tue, 26 Dec 2023 17:11:35 +0800
-Subject: [PATCH 18/50] net: ethernet: qualcomm: Add PPE driver for IPQ9574 SoC
-
-The PPE (Packet Process Engine) hardware block is available
-on Qualcomm IPQ SoC that support PPE architecture, such as
-IPQ9574.
-
-The PPE in IPQ9574 includes six integrated ethernet MAC
-(for 6 PPE ports), buffer management, queue management and
-scheduler functions. The MACs can connect with the external
-PHY or switch devices using the UNIPHY PCS block available
-in the SoC.
-
-The PPE also includes various packet processing offload
-capabilities such as L3 routing and L2 bridging, VLAN and
-tunnel processing offload. It also includes Ethernet DMA (EDMA)
-function for transferring packets between ARM cores and PPE
-ethernet ports.
-
-This patch adds the base source files and Makefiles for the PPE
-driver such as platform driver registration, clock initialization,
-and PPE reset routines.
-
-Change-Id: I73166b5d4bb7e3c42ec6e0ac178a75528a25ef30
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/ethernet/qualcomm/Kconfig | 15 ++
- drivers/net/ethernet/qualcomm/Makefile | 1 +
- drivers/net/ethernet/qualcomm/ppe/Makefile | 7 +
- drivers/net/ethernet/qualcomm/ppe/ppe.c | 225 +++++++++++++++++++++
- drivers/net/ethernet/qualcomm/ppe/ppe.h | 36 ++++
- 5 files changed, 284 insertions(+)
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/Makefile
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe.h
-
---- a/drivers/net/ethernet/qualcomm/Kconfig
-+++ b/drivers/net/ethernet/qualcomm/Kconfig
-@@ -61,6 +61,21 @@ config QCOM_EMAC
- low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
- Precision Clock Synchronization Protocol.
-
-+config QCOM_PPE
-+ tristate "Qualcomm Technologies, Inc. PPE Ethernet support"
-+ depends on HAS_IOMEM && OF
-+ depends on COMMON_CLK
-+ select REGMAP_MMIO
-+ help
-+ This driver supports the Qualcomm Technologies, Inc. packet
-+ process engine (PPE) available with IPQ SoC. The PPE houses
-+ the ethernet MACs, Ethernet DMA (EDMA) and switch core that
-+ supports L3 flow offload, L2 switch function, RSS and tunnel
-+ offload.
-+
-+ To compile this driver as a module, choose M here. The module
-+ will be called qcom-ppe.
-+
- source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
-
- endif # NET_VENDOR_QUALCOMM
---- a/drivers/net/ethernet/qualcomm/Makefile
-+++ b/drivers/net/ethernet/qualcomm/Makefile
-@@ -11,4 +11,5 @@ qcauart-objs := qca_uart.o
-
- obj-y += emac/
-
-+obj-$(CONFIG_QCOM_PPE) += ppe/
- obj-$(CONFIG_RMNET) += rmnet/
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
-@@ -0,0 +1,7 @@
-+# SPDX-License-Identifier: GPL-2.0-only
-+#
-+# Makefile for the device driver of PPE (Packet Process Engine) in IPQ SoC
-+#
-+
-+obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
-+qcom-ppe-objs := ppe.o
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
-@@ -0,0 +1,225 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* PPE platform device probe, DTSI parser and PPE clock initializations. */
-+
-+#include <linux/clk.h>
-+#include <linux/interconnect.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/platform_device.h>
-+#include <linux/regmap.h>
-+#include <linux/reset.h>
-+
-+#include "ppe.h"
-+
-+#define PPE_PORT_MAX 8
-+#define PPE_CLK_RATE 353000000
-+
-+/* ICC clocks for enabling PPE device. The avg and peak with value 0
-+ * will be decided by the clock rate of PPE.
-+ */
-+static const struct icc_bulk_data ppe_icc_data[] = {
-+ {
-+ .name = "ppe",
-+ .avg_bw = 0,
-+ .peak_bw = 0,
-+ },
-+ {
-+ .name = "ppe_cfg",
-+ .avg_bw = 0,
-+ .peak_bw = 0,
-+ },
-+ {
-+ .name = "qos_gen",
-+ .avg_bw = 6000,
-+ .peak_bw = 6000,
-+ },
-+ {
-+ .name = "timeout_ref",
-+ .avg_bw = 6000,
-+ .peak_bw = 6000,
-+ },
-+ {
-+ .name = "nssnoc_memnoc",
-+ .avg_bw = 533333,
-+ .peak_bw = 533333,
-+ },
-+ {
-+ .name = "memnoc_nssnoc",
-+ .avg_bw = 533333,
-+ .peak_bw = 533333,
-+ },
-+ {
-+ .name = "memnoc_nssnoc_1",
-+ .avg_bw = 533333,
-+ .peak_bw = 533333,
-+ },
-+};
-+
-+static const struct regmap_range ppe_readable_ranges[] = {
-+ regmap_reg_range(0x0, 0x1ff), /* Global */
-+ regmap_reg_range(0x400, 0x5ff), /* LPI CSR */
-+ regmap_reg_range(0x1000, 0x11ff), /* GMAC0 */
-+ regmap_reg_range(0x1200, 0x13ff), /* GMAC1 */
-+ regmap_reg_range(0x1400, 0x15ff), /* GMAC2 */
-+ regmap_reg_range(0x1600, 0x17ff), /* GMAC3 */
-+ regmap_reg_range(0x1800, 0x19ff), /* GMAC4 */
-+ regmap_reg_range(0x1a00, 0x1bff), /* GMAC5 */
-+ regmap_reg_range(0xb000, 0xefff), /* PRX CSR */
-+ regmap_reg_range(0xf000, 0x1efff), /* IPE */
-+ regmap_reg_range(0x20000, 0x5ffff), /* PTX CSR */
-+ regmap_reg_range(0x60000, 0x9ffff), /* IPE L2 CSR */
-+ regmap_reg_range(0xb0000, 0xeffff), /* IPO CSR */
-+ regmap_reg_range(0x100000, 0x17ffff), /* IPE PC */
-+ regmap_reg_range(0x180000, 0x1bffff), /* PRE IPO CSR */
-+ regmap_reg_range(0x1d0000, 0x1dffff), /* Tunnel parser */
-+ regmap_reg_range(0x1e0000, 0x1effff), /* Ingress parse */
-+ regmap_reg_range(0x200000, 0x2fffff), /* IPE L3 */
-+ regmap_reg_range(0x300000, 0x3fffff), /* IPE tunnel */
-+ regmap_reg_range(0x400000, 0x4fffff), /* Scheduler */
-+ regmap_reg_range(0x500000, 0x503fff), /* XGMAC0 */
-+ regmap_reg_range(0x504000, 0x507fff), /* XGMAC1 */
-+ regmap_reg_range(0x508000, 0x50bfff), /* XGMAC2 */
-+ regmap_reg_range(0x50c000, 0x50ffff), /* XGMAC3 */
-+ regmap_reg_range(0x510000, 0x513fff), /* XGMAC4 */
-+ regmap_reg_range(0x514000, 0x517fff), /* XGMAC5 */
-+ regmap_reg_range(0x600000, 0x6fffff), /* BM */
-+ regmap_reg_range(0x800000, 0x9fffff), /* QM */
-+ regmap_reg_range(0xb00000, 0xbef800), /* EDMA */
-+};
-+
-+static const struct regmap_access_table ppe_reg_table = {
-+ .yes_ranges = ppe_readable_ranges,
-+ .n_yes_ranges = ARRAY_SIZE(ppe_readable_ranges),
-+};
-+
-+static const struct regmap_config regmap_config_ipq9574 = {
-+ .reg_bits = 32,
-+ .reg_stride = 4,
-+ .val_bits = 32,
-+ .rd_table = &ppe_reg_table,
-+ .wr_table = &ppe_reg_table,
-+ .max_register = 0xbef800,
-+ .fast_io = true,
-+};
-+
-+static int ppe_clock_init_and_reset(struct ppe_device *ppe_dev)
-+{
-+ unsigned long ppe_rate = ppe_dev->clk_rate;
-+ struct device *dev = ppe_dev->dev;
-+ struct reset_control *rstc;
-+ struct clk_bulk_data *clks;
-+ struct clk *clk;
-+ int ret, i;
-+
-+ for (i = 0; i < ppe_dev->num_icc_paths; i++) {
-+ ppe_dev->icc_paths[i].name = ppe_icc_data[i].name;
-+ ppe_dev->icc_paths[i].avg_bw = ppe_icc_data[i].avg_bw ? :
-+ Bps_to_icc(ppe_rate);
-+ ppe_dev->icc_paths[i].peak_bw = ppe_icc_data[i].peak_bw ? :
-+ Bps_to_icc(ppe_rate);
-+ }
-+
-+ ret = devm_of_icc_bulk_get(dev, ppe_dev->num_icc_paths,
-+ ppe_dev->icc_paths);
-+ if (ret)
-+ return ret;
-+
-+ ret = icc_bulk_set_bw(ppe_dev->num_icc_paths, ppe_dev->icc_paths);
-+ if (ret)
-+ return ret;
-+
-+ /* PPE clocks take the same clock tree, which work on the same
-+ * clock rate. Setting the clock rate of "ppe" ensures the clock
-+ * rate of all PPE clocks configured as same.
-+ */
-+ clk = devm_clk_get(dev, "ppe");
-+ if (IS_ERR(clk))
-+ return PTR_ERR(clk);
-+
-+ ret = clk_set_rate(clk, ppe_rate);
-+ if (ret)
-+ return ret;
-+
-+ ret = devm_clk_bulk_get_all_enabled(dev, &clks);
-+ if (ret < 0)
-+ return ret;
-+
-+ rstc = devm_reset_control_get_exclusive(dev, NULL);
-+ if (IS_ERR(rstc))
-+ return PTR_ERR(rstc);
-+
-+ /* Reset PPE, the delay 100ms of assert and deassert is necessary
-+ * for resetting PPE.
-+ */
-+ ret = reset_control_assert(rstc);
-+ if (ret)
-+ return ret;
-+
-+ msleep(100);
-+ ret = reset_control_deassert(rstc);
-+ if (ret)
-+ return ret;
-+
-+ msleep(100);
-+
-+ return 0;
-+}
-+
-+static int qcom_ppe_probe(struct platform_device *pdev)
-+{
-+ struct device *dev = &pdev->dev;
-+ struct ppe_device *ppe_dev;
-+ void __iomem *base;
-+ int ret, num_icc;
-+
-+ num_icc = ARRAY_SIZE(ppe_icc_data);
-+ ppe_dev = devm_kzalloc(dev,
-+ struct_size(ppe_dev, icc_paths, num_icc),
-+ GFP_KERNEL);
-+ if (!ppe_dev)
-+ return dev_err_probe(dev, -ENOMEM, "PPE alloc memory failed\n");
-+
-+ base = devm_platform_ioremap_resource(pdev, 0);
-+ if (IS_ERR(base))
-+ return dev_err_probe(dev, PTR_ERR(base), "PPE ioremap failed\n");
-+
-+ ppe_dev->regmap = devm_regmap_init_mmio(dev, base, ®map_config_ipq9574);
-+ if (IS_ERR(ppe_dev->regmap))
-+ return dev_err_probe(dev, PTR_ERR(ppe_dev->regmap),
-+ "PPE initialize regmap failed\n");
-+ ppe_dev->dev = dev;
-+ ppe_dev->clk_rate = PPE_CLK_RATE;
-+ ppe_dev->num_ports = PPE_PORT_MAX;
-+ ppe_dev->num_icc_paths = num_icc;
-+
-+ ret = ppe_clock_init_and_reset(ppe_dev);
-+ if (ret)
-+ return dev_err_probe(dev, ret, "PPE clock config failed\n");
-+
-+ platform_set_drvdata(pdev, ppe_dev);
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id qcom_ppe_of_match[] = {
-+ { .compatible = "qcom,ipq9574-ppe" },
-+ {},
-+};
-+MODULE_DEVICE_TABLE(of, qcom_ppe_of_match);
-+
-+static struct platform_driver qcom_ppe_driver = {
-+ .driver = {
-+ .name = "qcom_ppe",
-+ .of_match_table = qcom_ppe_of_match,
-+ },
-+ .probe = qcom_ppe_probe,
-+};
-+module_platform_driver(qcom_ppe_driver);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("Qualcomm IPQ PPE driver");
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.h
-@@ -0,0 +1,36 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ *
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef __PPE_H__
-+#define __PPE_H__
-+
-+#include <linux/compiler.h>
-+#include <linux/interconnect.h>
-+
-+struct device;
-+struct regmap;
-+
-+/**
-+ * struct ppe_device - PPE device private data.
-+ * @dev: PPE device structure.
-+ * @regmap: PPE register map.
-+ * @clk_rate: PPE clock rate.
-+ * @num_ports: Number of PPE ports.
-+ * @num_icc_paths: Number of interconnect paths.
-+ * @icc_paths: Interconnect path array.
-+ *
-+ * PPE device is the instance of PPE hardware, which is used to
-+ * configure PPE packet process modules such as BM (buffer management),
-+ * QM (queue management), and scheduler.
-+ */
-+struct ppe_device {
-+ struct device *dev;
-+ struct regmap *regmap;
-+ unsigned long clk_rate;
-+ unsigned int num_ports;
-+ unsigned int num_icc_paths;
-+ struct icc_bulk_data icc_paths[] __counted_by(num_icc_paths);
-+};
-+#endif
+++ /dev/null
-From 049820d8a0c918cedd4524eda9abf750819ac901 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Tue, 26 Dec 2023 18:19:30 +0800
-Subject: [PATCH 19/50] net: ethernet: qualcomm: Add PPE buffer manager
- configuration
-
-The BM (Buffer Management) config controls the pause frame generated
-on the PPE port. There are maximum 15 BM ports and 4 groups supported,
-all BM ports are assigned to group 0 by default. The number of hardware
-buffers configured for the port influence the threshold of the flow
-control for that port.
-
-Change-Id: Ifb1b69c89966cf5cab19f8e2661c64a4dc6230fe
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
- drivers/net/ethernet/qualcomm/ppe/ppe.c | 5 +
- .../net/ethernet/qualcomm/ppe/ppe_config.c | 181 ++++++++++++++++++
- .../net/ethernet/qualcomm/ppe/ppe_config.h | 10 +
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 54 ++++++
- 5 files changed, 251 insertions(+), 1 deletion(-)
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_config.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_config.h
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-
---- a/drivers/net/ethernet/qualcomm/ppe/Makefile
-+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
-@@ -4,4 +4,4 @@
- #
-
- obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
--qcom-ppe-objs := ppe.o
-+qcom-ppe-objs := ppe.o ppe_config.o
---- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
-@@ -15,6 +15,7 @@
- #include <linux/reset.h>
-
- #include "ppe.h"
-+#include "ppe_config.h"
-
- #define PPE_PORT_MAX 8
- #define PPE_CLK_RATE 353000000
-@@ -201,6 +202,10 @@ static int qcom_ppe_probe(struct platfor
- if (ret)
- return dev_err_probe(dev, ret, "PPE clock config failed\n");
-
-+ ret = ppe_hw_config(ppe_dev);
-+ if (ret)
-+ return dev_err_probe(dev, ret, "PPE HW config failed\n");
-+
- platform_set_drvdata(pdev, ppe_dev);
-
- return 0;
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-@@ -0,0 +1,181 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* PPE HW initialization configs such as BM(buffer management),
-+ * QM(queue management) and scheduler configs.
-+ */
-+
-+#include <linux/bitfield.h>
-+#include <linux/bits.h>
-+#include <linux/device.h>
-+#include <linux/regmap.h>
-+
-+#include "ppe.h"
-+#include "ppe_config.h"
-+#include "ppe_regs.h"
-+
-+/**
-+ * struct ppe_bm_port_config - PPE BM port configuration.
-+ * @port_id_start: The fist BM port ID to configure.
-+ * @port_id_end: The last BM port ID to configure.
-+ * @pre_alloc: BM port dedicated buffer number.
-+ * @in_fly_buf: Buffer number for receiving the packet after pause frame sent.
-+ * @ceil: Ceil to generate the back pressure.
-+ * @weight: Weight value.
-+ * @resume_offset: Resume offset from the threshold value.
-+ * @resume_ceil: Ceil to resume from the back pressure state.
-+ * @dynamic: Dynamic threshold used or not.
-+ *
-+ * The is for configuring the threshold that impacts the port
-+ * flow control.
-+ */
-+struct ppe_bm_port_config {
-+ unsigned int port_id_start;
-+ unsigned int port_id_end;
-+ unsigned int pre_alloc;
-+ unsigned int in_fly_buf;
-+ unsigned int ceil;
-+ unsigned int weight;
-+ unsigned int resume_offset;
-+ unsigned int resume_ceil;
-+ bool dynamic;
-+};
-+
-+static int ipq9574_ppe_bm_group_config = 1550;
-+static struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
-+ {
-+ .port_id_start = 0,
-+ .port_id_end = 0,
-+ .pre_alloc = 0,
-+ .in_fly_buf = 100,
-+ .ceil = 1146,
-+ .weight = 7,
-+ .resume_offset = 8,
-+ .resume_ceil = 0,
-+ .dynamic = true,
-+ },
-+ {
-+ .port_id_start = 1,
-+ .port_id_end = 7,
-+ .pre_alloc = 0,
-+ .in_fly_buf = 100,
-+ .ceil = 250,
-+ .weight = 4,
-+ .resume_offset = 36,
-+ .resume_ceil = 0,
-+ .dynamic = true,
-+ },
-+ {
-+ .port_id_start = 8,
-+ .port_id_end = 13,
-+ .pre_alloc = 0,
-+ .in_fly_buf = 128,
-+ .ceil = 250,
-+ .weight = 4,
-+ .resume_offset = 36,
-+ .resume_ceil = 0,
-+ .dynamic = true,
-+ },
-+ {
-+ .port_id_start = 14,
-+ .port_id_end = 14,
-+ .pre_alloc = 0,
-+ .in_fly_buf = 40,
-+ .ceil = 250,
-+ .weight = 4,
-+ .resume_offset = 36,
-+ .resume_ceil = 0,
-+ .dynamic = true,
-+ },
-+};
-+
-+static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
-+ struct ppe_bm_port_config port_cfg)
-+{
-+ u32 reg, val, bm_fc_val[2];
-+ int ret;
-+
-+ /* Configure BM flow control related threshold */
-+ PPE_BM_PORT_FC_SET_WEIGHT(bm_fc_val, port_cfg.weight);
-+ PPE_BM_PORT_FC_SET_RESUME_OFFSET(bm_fc_val, port_cfg.resume_offset);
-+ PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(bm_fc_val, port_cfg.resume_ceil);
-+ PPE_BM_PORT_FC_SET_DYNAMIC(bm_fc_val, port_cfg.dynamic);
-+ PPE_BM_PORT_FC_SET_REACT_LIMIT(bm_fc_val, port_cfg.in_fly_buf);
-+ PPE_BM_PORT_FC_SET_PRE_ALLOC(bm_fc_val, port_cfg.pre_alloc);
-+
-+ /* Ceiling is divided into the different register word. */
-+ val = FIELD_GET(GENMASK(2, 0), port_cfg.ceil);
-+ PPE_BM_PORT_FC_SET_CEILING_LOW(bm_fc_val, val);
-+ val = FIELD_GET(GENMASK(10, 3), port_cfg.ceil);
-+ PPE_BM_PORT_FC_SET_CEILING_HIGH(bm_fc_val, val);
-+
-+ reg = PPE_BM_PORT_FC_CFG_ADDR + PPE_BM_PORT_FC_CFG_INC * bm_port_id;
-+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
-+ bm_fc_val, ARRAY_SIZE(bm_fc_val));
-+ if (ret)
-+ return ret;
-+
-+ /* Assign the default group ID 0 to the BM port */
-+ val = FIELD_PREP(PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID, 0);
-+ reg = PPE_BM_PORT_GROUP_ID_ADDR + PPE_BM_PORT_GROUP_ID_INC * bm_port_id;
-+ ret = regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID,
-+ val);
-+ if (ret)
-+ return ret;
-+
-+ /* Enable BM port flow control */
-+ val = FIELD_PREP(PPE_BM_PORT_FC_MODE_EN, true);
-+ reg = PPE_BM_PORT_FC_MODE_ADDR + PPE_BM_PORT_FC_MODE_INC * bm_port_id;
-+
-+ return regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_BM_PORT_FC_MODE_EN,
-+ val);
-+}
-+
-+/* Configure the buffer threshold for the port flow control function. */
-+static int ppe_config_bm(struct ppe_device *ppe_dev)
-+{
-+ unsigned int i, bm_port_id, port_cfg_cnt;
-+ struct ppe_bm_port_config *port_cfg;
-+ u32 reg, val;
-+ int ret;
-+
-+ /* Configure the buffer number of group 0 by default. The buffer
-+ * number of group 1-3 is cleared to 0 after PPE reset on the probe
-+ * of PPE driver.
-+ */
-+ reg = PPE_BM_SHARED_GROUP_CFG_ADDR;
-+ val = FIELD_PREP(PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
-+ ipq9574_ppe_bm_group_config);
-+ ret = regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
-+ val);
-+ if (ret)
-+ goto bm_config_fail;
-+
-+ port_cfg = ipq9574_ppe_bm_port_config;
-+ port_cfg_cnt = ARRAY_SIZE(ipq9574_ppe_bm_port_config);
-+ for (i = 0; i < port_cfg_cnt; i++) {
-+ for (bm_port_id = port_cfg[i].port_id_start;
-+ bm_port_id <= port_cfg[i].port_id_end; bm_port_id++) {
-+ ret = ppe_config_bm_threshold(ppe_dev, bm_port_id,
-+ port_cfg[i]);
-+ if (ret)
-+ goto bm_config_fail;
-+ }
-+ }
-+
-+ return 0;
-+
-+bm_config_fail:
-+ dev_err(ppe_dev->dev, "PPE BM config error %d\n", ret);
-+ return ret;
-+}
-+
-+int ppe_hw_config(struct ppe_device *ppe_dev)
-+{
-+ return ppe_config_bm(ppe_dev);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-@@ -0,0 +1,10 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ *
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef __PPE_CONFIG_H__
-+#define __PPE_CONFIG_H__
-+
-+int ppe_hw_config(struct ppe_device *ppe_dev);
-+#endif
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -0,0 +1,54 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ *
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* PPE hardware register and table declarations. */
-+#ifndef __PPE_REGS_H__
-+#define __PPE_REGS_H__
-+
-+/* There are 15 BM ports and 4 BM groups supported by PPE,
-+ * BM port (0-7) is matched to EDMA port 0, BM port (8-13) is matched
-+ * to PPE physical port 1-6, BM port 14 is matched to EIP.
-+ */
-+#define PPE_BM_PORT_FC_MODE_ADDR 0x600100
-+#define PPE_BM_PORT_FC_MODE_INC 0x4
-+#define PPE_BM_PORT_FC_MODE_EN BIT(0)
-+
-+#define PPE_BM_PORT_GROUP_ID_ADDR 0x600180
-+#define PPE_BM_PORT_GROUP_ID_INC 0x4
-+#define PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID GENMASK(1, 0)
-+
-+#define PPE_BM_SHARED_GROUP_CFG_ADDR 0x600290
-+#define PPE_BM_SHARED_GROUP_CFG_INC 0x4
-+#define PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT GENMASK(10, 0)
-+
-+#define PPE_BM_PORT_FC_CFG_ADDR 0x601000
-+#define PPE_BM_PORT_FC_CFG_INC 0x10
-+#define PPE_BM_PORT_FC_W0_REACT_LIMIT GENMASK(8, 0)
-+#define PPE_BM_PORT_FC_W0_RESUME_THRESHOLD GENMASK(17, 9)
-+#define PPE_BM_PORT_FC_W0_RESUME_OFFSET GENMASK(28, 18)
-+#define PPE_BM_PORT_FC_W0_CEILING_LOW GENMASK(31, 29)
-+#define PPE_BM_PORT_FC_W1_CEILING_HIGH GENMASK(7, 0)
-+#define PPE_BM_PORT_FC_W1_WEIGHT GENMASK(10, 8)
-+#define PPE_BM_PORT_FC_W1_DYNAMIC BIT(11)
-+#define PPE_BM_PORT_FC_W1_PRE_ALLOC GENMASK(22, 12)
-+
-+#define PPE_BM_PORT_FC_SET_REACT_LIMIT(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_BM_PORT_FC_W0_REACT_LIMIT)
-+#define PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_BM_PORT_FC_W0_RESUME_THRESHOLD)
-+#define PPE_BM_PORT_FC_SET_RESUME_OFFSET(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_BM_PORT_FC_W0_RESUME_OFFSET)
-+#define PPE_BM_PORT_FC_SET_CEILING_LOW(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_BM_PORT_FC_W0_CEILING_LOW)
-+#define PPE_BM_PORT_FC_SET_CEILING_HIGH(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_CEILING_HIGH)
-+#define PPE_BM_PORT_FC_SET_WEIGHT(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_WEIGHT)
-+#define PPE_BM_PORT_FC_SET_DYNAMIC(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_DYNAMIC)
-+#define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_PRE_ALLOC)
-+
-+#endif
+++ /dev/null
-From 12a50075552d0e2ada65c039e5a09ca50421f152 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Tue, 26 Dec 2023 19:34:49 +0800
-Subject: [PATCH 20/50] net: ethernet: qualcomm: Add PPE queue management
- config
-
-QM (queue management) config decides the length of PPE port queues
-and the threshold to drop packet.
-
-There are two types of PPE queue, unicast queue (0-255) and multicast
-queue (256-299) are configured with different length, which are used
-to forward the different types of traffic.
-
-Change-Id: I74ffcb6a39618ca8f585b5204d483fb45edecba8
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- .../net/ethernet/qualcomm/ppe/ppe_config.c | 176 +++++++++++++++++-
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 82 ++++++++
- 2 files changed, 257 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-@@ -43,6 +43,27 @@ struct ppe_bm_port_config {
- bool dynamic;
- };
-
-+/**
-+ * struct ppe_qm_queue_config - PPE queue config.
-+ * @queue_start: PPE start of queue ID.
-+ * @queue_end: PPE end of queue ID.
-+ * @prealloc_buf: Queue dedicated buffer number.
-+ * @ceil: Ceil to start drop packet from queue.
-+ * @weight: Weight value.
-+ * @resume_offset: Resume offset from the threshold.
-+ * @dynamic: Threshold value is decided dynamically or statically.
-+ *
-+ */
-+struct ppe_qm_queue_config {
-+ unsigned int queue_start;
-+ unsigned int queue_end;
-+ unsigned int prealloc_buf;
-+ unsigned int ceil;
-+ unsigned int weight;
-+ unsigned int resume_offset;
-+ bool dynamic;
-+};
-+
- static int ipq9574_ppe_bm_group_config = 1550;
- static struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
- {
-@@ -91,6 +112,31 @@ static struct ppe_bm_port_config ipq9574
- },
- };
-
-+/* Default QM group settings for IPQ9754. */
-+static int ipq9574_ppe_qm_group_config = 2000;
-+
-+/* Default QM settings for unicast and multicast queues for IPQ9754. */
-+static struct ppe_qm_queue_config ipq9574_ppe_qm_queue_config[] = {
-+ {
-+ .queue_start = 0,
-+ .queue_end = 255,
-+ .prealloc_buf = 0,
-+ .ceil = 400,
-+ .weight = 4,
-+ .resume_offset = 36,
-+ .dynamic = true,
-+ },
-+ {
-+ .queue_start = 256,
-+ .queue_end = 299,
-+ .prealloc_buf = 0,
-+ .ceil = 250,
-+ .weight = 0,
-+ .resume_offset = 36,
-+ .dynamic = false,
-+ },
-+};
-+
- static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
- struct ppe_bm_port_config port_cfg)
- {
-@@ -175,7 +221,135 @@ bm_config_fail:
- return ret;
- }
-
-+/* Configure PPE hardware queue depth, which is decided by the threshold
-+ * of queue.
-+ */
-+static int ppe_config_qm(struct ppe_device *ppe_dev)
-+{
-+ struct ppe_qm_queue_config *queue_cfg;
-+ int ret, i, queue_id, queue_cfg_count;
-+ u32 reg, multicast_queue_cfg[5];
-+ u32 unicast_queue_cfg[4];
-+ u32 group_cfg[3];
-+
-+ /* Assign the buffer number to the group 0 by default. */
-+ reg = PPE_AC_GRP_CFG_TBL_ADDR;
-+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
-+ group_cfg, ARRAY_SIZE(group_cfg));
-+ if (ret)
-+ goto qm_config_fail;
-+
-+ PPE_AC_GRP_SET_BUF_LIMIT(group_cfg, ipq9574_ppe_qm_group_config);
-+
-+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
-+ group_cfg, ARRAY_SIZE(group_cfg));
-+ if (ret)
-+ goto qm_config_fail;
-+
-+ queue_cfg = ipq9574_ppe_qm_queue_config;
-+ queue_cfg_count = ARRAY_SIZE(ipq9574_ppe_qm_queue_config);
-+ for (i = 0; i < queue_cfg_count; i++) {
-+ queue_id = queue_cfg[i].queue_start;
-+
-+ /* Configure threshold for dropping packet from unicast queue
-+ * and multicast queue, which belong to the different queue ID.
-+ */
-+ while (queue_id <= queue_cfg[i].queue_end) {
-+ if (queue_id < PPE_AC_UNI_QUEUE_CFG_TBL_NUM) {
-+ reg = PPE_AC_UNI_QUEUE_CFG_TBL_ADDR +
-+ PPE_AC_UNI_QUEUE_CFG_TBL_INC * queue_id;
-+
-+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
-+ unicast_queue_cfg,
-+ ARRAY_SIZE(unicast_queue_cfg));
-+ if (ret)
-+ goto qm_config_fail;
-+
-+ PPE_AC_UNI_QUEUE_SET_EN(unicast_queue_cfg, true);
-+ PPE_AC_UNI_QUEUE_SET_GRP_ID(unicast_queue_cfg, 0);
-+ PPE_AC_UNI_QUEUE_SET_PRE_LIMIT(unicast_queue_cfg,
-+ queue_cfg[i].prealloc_buf);
-+ PPE_AC_UNI_QUEUE_SET_DYNAMIC(unicast_queue_cfg,
-+ queue_cfg[i].dynamic);
-+ PPE_AC_UNI_QUEUE_SET_WEIGHT(unicast_queue_cfg,
-+ queue_cfg[i].weight);
-+ PPE_AC_UNI_QUEUE_SET_THRESHOLD(unicast_queue_cfg,
-+ queue_cfg[i].ceil);
-+ PPE_AC_UNI_QUEUE_SET_GRN_RESUME(unicast_queue_cfg,
-+ queue_cfg[i].resume_offset);
-+
-+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
-+ unicast_queue_cfg,
-+ ARRAY_SIZE(unicast_queue_cfg));
-+ if (ret)
-+ goto qm_config_fail;
-+ } else {
-+ reg = PPE_AC_MUL_QUEUE_CFG_TBL_ADDR +
-+ PPE_AC_MUL_QUEUE_CFG_TBL_INC * queue_id;
-+
-+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
-+ multicast_queue_cfg,
-+ ARRAY_SIZE(multicast_queue_cfg));
-+ if (ret)
-+ goto qm_config_fail;
-+
-+ PPE_AC_MUL_QUEUE_SET_EN(multicast_queue_cfg, true);
-+ PPE_AC_MUL_QUEUE_SET_GRN_GRP_ID(multicast_queue_cfg, 0);
-+ PPE_AC_MUL_QUEUE_SET_GRN_PRE_LIMIT(multicast_queue_cfg,
-+ queue_cfg[i].prealloc_buf);
-+ PPE_AC_MUL_QUEUE_SET_GRN_THRESHOLD(multicast_queue_cfg,
-+ queue_cfg[i].ceil);
-+ PPE_AC_MUL_QUEUE_SET_GRN_RESUME(multicast_queue_cfg,
-+ queue_cfg[i].resume_offset);
-+
-+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
-+ multicast_queue_cfg,
-+ ARRAY_SIZE(multicast_queue_cfg));
-+ if (ret)
-+ goto qm_config_fail;
-+ }
-+
-+ /* Enable enqueue */
-+ reg = PPE_ENQ_OPR_TBL_ADDR + PPE_ENQ_OPR_TBL_INC * queue_id;
-+ ret = regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_ENQ_OPR_TBL_ENQ_DISABLE,
-+ FIELD_PREP(PPE_ENQ_OPR_TBL_ENQ_DISABLE, false));
-+ if (ret)
-+ goto qm_config_fail;
-+
-+ /* Enable dequeue */
-+ reg = PPE_DEQ_OPR_TBL_ADDR + PPE_DEQ_OPR_TBL_INC * queue_id;
-+ ret = regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_DEQ_OPR_TBL_DEQ_DISABLE,
-+ FIELD_PREP(PPE_ENQ_OPR_TBL_ENQ_DISABLE, false));
-+ if (ret)
-+ goto qm_config_fail;
-+
-+ queue_id++;
-+ }
-+ }
-+
-+ /* Enable queue counter for all PPE hardware queues. */
-+ ret = regmap_update_bits(ppe_dev->regmap, PPE_EG_BRIDGE_CONFIG_ADDR,
-+ PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN,
-+ PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN);
-+ if (ret)
-+ goto qm_config_fail;
-+
-+ return 0;
-+
-+qm_config_fail:
-+ dev_err(ppe_dev->dev, "PPE QM config error %d\n", ret);
-+ return ret;
-+}
-+
- int ppe_hw_config(struct ppe_device *ppe_dev)
- {
-- return ppe_config_bm(ppe_dev);
-+ int ret;
-+
-+ ret = ppe_config_bm(ppe_dev);
-+ if (ret)
-+ return ret;
-+
-+ return ppe_config_qm(ppe_dev);
- }
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -11,6 +11,14 @@
- * BM port (0-7) is matched to EDMA port 0, BM port (8-13) is matched
- * to PPE physical port 1-6, BM port 14 is matched to EIP.
- */
-+#define PPE_EG_BRIDGE_CONFIG_ADDR 0x20044
-+#define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN BIT(2)
-+
-+#define PPE_DEQ_OPR_TBL_ADDR 0x430000
-+#define PPE_DEQ_OPR_TBL_NUM 300
-+#define PPE_DEQ_OPR_TBL_INC 0x10
-+#define PPE_DEQ_OPR_TBL_DEQ_DISABLE BIT(0)
-+
- #define PPE_BM_PORT_FC_MODE_ADDR 0x600100
- #define PPE_BM_PORT_FC_MODE_INC 0x4
- #define PPE_BM_PORT_FC_MODE_EN BIT(0)
-@@ -51,4 +59,78 @@
- #define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value) \
- u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_PRE_ALLOC)
-
-+/* PPE unicast queue (0-255) configurations. */
-+#define PPE_AC_UNI_QUEUE_CFG_TBL_ADDR 0x848000
-+#define PPE_AC_UNI_QUEUE_CFG_TBL_NUM 256
-+#define PPE_AC_UNI_QUEUE_CFG_TBL_INC 0x10
-+#define PPE_AC_UNI_QUEUE_CFG_W0_EN BIT(0)
-+#define PPE_AC_UNI_QUEUE_CFG_W0_WRED_EN BIT(1)
-+#define PPE_AC_UNI_QUEUE_CFG_W0_FC_EN BIT(2)
-+#define PPE_AC_UNI_QUEUE_CFG_W0_COLOR_AWARE BIT(3)
-+#define PPE_AC_UNI_QUEUE_CFG_W0_GRP_ID GENMASK(5, 4)
-+#define PPE_AC_UNI_QUEUE_CFG_W0_PRE_LIMIT GENMASK(16, 6)
-+#define PPE_AC_UNI_QUEUE_CFG_W0_DYNAMIC BIT(17)
-+#define PPE_AC_UNI_QUEUE_CFG_W0_WEIGHT GENMASK(20, 18)
-+#define PPE_AC_UNI_QUEUE_CFG_W0_THRESHOLD GENMASK(31, 21)
-+#define PPE_AC_UNI_QUEUE_CFG_W3_GRN_RESUME GENMASK(23, 13)
-+
-+#define PPE_AC_UNI_QUEUE_SET_EN(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNI_QUEUE_CFG_W0_EN)
-+#define PPE_AC_UNI_QUEUE_SET_GRP_ID(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNI_QUEUE_CFG_W0_GRP_ID)
-+#define PPE_AC_UNI_QUEUE_SET_PRE_LIMIT(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNI_QUEUE_CFG_W0_PRE_LIMIT)
-+#define PPE_AC_UNI_QUEUE_SET_DYNAMIC(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNI_QUEUE_CFG_W0_DYNAMIC)
-+#define PPE_AC_UNI_QUEUE_SET_WEIGHT(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNI_QUEUE_CFG_W0_WEIGHT)
-+#define PPE_AC_UNI_QUEUE_SET_THRESHOLD(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNI_QUEUE_CFG_W0_THRESHOLD)
-+#define PPE_AC_UNI_QUEUE_SET_GRN_RESUME(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x3, value, PPE_AC_UNI_QUEUE_CFG_W3_GRN_RESUME)
-+
-+/* PPE multicast queue (256-299) configurations. */
-+#define PPE_AC_MUL_QUEUE_CFG_TBL_ADDR 0x84a000
-+#define PPE_AC_MUL_QUEUE_CFG_TBL_NUM 44
-+#define PPE_AC_MUL_QUEUE_CFG_TBL_INC 0x10
-+#define PPE_AC_MUL_QUEUE_CFG_W0_EN BIT(0)
-+#define PPE_AC_MUL_QUEUE_CFG_W0_FC_EN BIT(1)
-+#define PPE_AC_MUL_QUEUE_CFG_W0_COLOR_AWARE BIT(2)
-+#define PPE_AC_MUL_QUEUE_CFG_W0_GRP_ID GENMASK(4, 3)
-+#define PPE_AC_MUL_QUEUE_CFG_W0_PRE_LIMIT GENMASK(15, 5)
-+#define PPE_AC_MUL_QUEUE_CFG_W0_THRESHOLD GENMASK(26, 16)
-+#define PPE_AC_MUL_QUEUE_CFG_W2_RESUME GENMASK(17, 7)
-+
-+#define PPE_AC_MUL_QUEUE_SET_EN(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_MUL_QUEUE_CFG_W0_EN)
-+#define PPE_AC_MUL_QUEUE_SET_GRN_GRP_ID(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_MUL_QUEUE_CFG_W0_GRP_ID)
-+#define PPE_AC_MUL_QUEUE_SET_GRN_PRE_LIMIT(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_MUL_QUEUE_CFG_W0_PRE_LIMIT)
-+#define PPE_AC_MUL_QUEUE_SET_GRN_THRESHOLD(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_MUL_QUEUE_CFG_W0_THRESHOLD)
-+#define PPE_AC_MUL_QUEUE_SET_GRN_RESUME(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x2, value, PPE_AC_MUL_QUEUE_CFG_W2_RESUME)
-+
-+/* PPE admission control group (0-3) configurations */
-+#define PPE_AC_GRP_CFG_TBL_ADDR 0x84c000
-+#define PPE_AC_GRP_CFG_TBL_NUM 0x4
-+#define PPE_AC_GRP_CFG_TBL_INC 0x10
-+#define PPE_AC_GRP_W0_AC_EN BIT(0)
-+#define PPE_AC_GRP_W0_AC_FC_EN BIT(1)
-+#define PPE_AC_GRP_W0_COLOR_AWARE BIT(2)
-+#define PPE_AC_GRP_W0_THRESHOLD_LOW GENMASK(31, 25)
-+#define PPE_AC_GRP_W1_THRESHOLD_HIGH GENMASK(3, 0)
-+#define PPE_AC_GRP_W1_BUF_LIMIT GENMASK(14, 4)
-+#define PPE_AC_GRP_W2_RESUME_GRN GENMASK(15, 5)
-+#define PPE_AC_GRP_W2_PRE_ALLOC GENMASK(26, 16)
-+
-+#define PPE_AC_GRP_SET_BUF_LIMIT(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_AC_GRP_W1_BUF_LIMIT)
-+
-+#define PPE_ENQ_OPR_TBL_ADDR 0x85c000
-+#define PPE_ENQ_OPR_TBL_NUM 300
-+#define PPE_ENQ_OPR_TBL_INC 0x10
-+#define PPE_ENQ_OPR_TBL_ENQ_DISABLE BIT(0)
-+
- #endif
+++ /dev/null
-From 8ae6ba538521693c4df0675a2f6a45f92daedb80 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Tue, 26 Dec 2023 20:18:09 +0800
-Subject: [PATCH 21/50] net: ethernet: qualcomm: Add PPE scheduler config
-
-PPE scheduler config determines the priority of scheduling the
-packet. The scheduler config is used for supporting the QoS
-offload in PPE hardware.
-
-Change-Id: I4811bd133074757371775a6a69a1cc3cfaa8d0d0
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
- drivers/net/ethernet/qualcomm/ppe/ppe_api.c | 40 +
- drivers/net/ethernet/qualcomm/ppe/ppe_api.h | 21 +
- .../net/ethernet/qualcomm/ppe/ppe_config.c | 884 +++++++++++++++++-
- .../net/ethernet/qualcomm/ppe/ppe_config.h | 26 +
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 94 ++
- 6 files changed, 1064 insertions(+), 3 deletions(-)
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_api.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_api.h
-
---- a/drivers/net/ethernet/qualcomm/ppe/Makefile
-+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
-@@ -4,4 +4,4 @@
- #
-
- obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
--qcom-ppe-objs := ppe.o ppe_config.o
-+qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
-@@ -0,0 +1,40 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#include "ppe.h"
-+#include "ppe_api.h"
-+#include "ppe_config.h"
-+
-+/**
-+ * ppe_queue_priority_set - set scheduler priority of PPE hardware queue
-+ * @ppe_dev: PPE device
-+ * @node_id: PPE hardware node ID, which is either queue ID or flow ID
-+ * @priority: Qos scheduler priority
-+ *
-+ * Configure scheduler priority of PPE hardware queque, the maximum node
-+ * ID supported is PPE_QUEUE_ID_NUM added by PPE_FLOW_ID_NUM, queue ID
-+ * belongs to level 0, flow ID belongs to level 1 in the packet pipeline.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_queue_priority_set(struct ppe_device *ppe_dev,
-+ int node_id, int priority)
-+{
-+ struct ppe_qos_scheduler_cfg sch_cfg;
-+ int ret, port, level = 0;
-+
-+ if (node_id >= PPE_QUEUE_ID_NUM) {
-+ level = 1;
-+ node_id -= PPE_QUEUE_ID_NUM;
-+ }
-+
-+ ret = ppe_queue_scheduler_get(ppe_dev, node_id, level, &port, &sch_cfg);
-+ if (ret)
-+ return ret;
-+
-+ sch_cfg.pri = priority;
-+
-+ return ppe_queue_scheduler_set(ppe_dev, node_id, level, port, sch_cfg);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
-@@ -0,0 +1,21 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ *
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* These may also be used by higher level network drivers such as ethernet or
-+ * QoS drivers.
-+ */
-+
-+#ifndef __PPE_API_H__
-+#define __PPE_API_H__
-+
-+#include "ppe.h"
-+
-+#define PPE_QUEUE_ID_NUM 300
-+#define PPE_FLOW_ID_NUM 64
-+#define PPE_QUEUE_SCH_PRI_NUM 8
-+
-+int ppe_queue_priority_set(struct ppe_device *ppe_dev,
-+ int queue_id, int priority);
-+#endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-@@ -13,6 +13,7 @@
- #include <linux/regmap.h>
-
- #include "ppe.h"
-+#include "ppe_api.h"
- #include "ppe_config.h"
- #include "ppe_regs.h"
-
-@@ -52,7 +53,6 @@ struct ppe_bm_port_config {
- * @weight: Weight value.
- * @resume_offset: Resume offset from the threshold.
- * @dynamic: Threshold value is decided dynamically or statically.
-- *
- */
- struct ppe_qm_queue_config {
- unsigned int queue_start;
-@@ -64,6 +64,61 @@ struct ppe_qm_queue_config {
- bool dynamic;
- };
-
-+/**
-+ * struct ppe_sch_bm_config - PPE arbitration for buffer config.
-+ * @valid: Arbitration entry valid or not.
-+ * @is_egress: Arbitration entry for egress or not.
-+ * @port: Port ID to use arbitration entry.
-+ * @second_valid: Second port valid or not.
-+ * @second_port: Second port to use.
-+ */
-+struct ppe_sch_bm_config {
-+ bool valid;
-+ bool is_egress;
-+ unsigned int port;
-+ bool second_valid;
-+ unsigned int second_port;
-+};
-+
-+/**
-+ * struct ppe_sch_schedule_config - PPE arbitration for scheduler config.
-+ * @ensch_port_bmp: Port bit map for enqueue scheduler.
-+ * @ensch_port: Port ID to enqueue scheduler.
-+ * @desch_port: Port ID to dequeue scheduler.
-+ * @desch_second_valid: Dequeue for the second port valid or not.
-+ * @desch_second_port: Second port ID to dequeue scheduler.
-+ */
-+struct ppe_sch_schedule_config {
-+ unsigned int ensch_port_bmp;
-+ unsigned int ensch_port;
-+ unsigned int desch_port;
-+ bool desch_second_valid;
-+ unsigned int desch_second_port;
-+};
-+
-+/**
-+ * struct ppe_port_schedule_config - PPE port scheduler config.
-+ * @port: Port ID to be scheduled.
-+ * @flow_level: Scheduler flow level or not.
-+ * @node_id: Node ID, for level 0, queue ID is used.
-+ * @loop_num: Loop number of scheduler config.
-+ * @pri_max: Max priority configured.
-+ * @flow_id: Strict priority ID.
-+ * @drr_node_id: Node ID for scheduler.
-+ *
-+ * PPE scheduler config, which decides the packet scheduler priority
-+ * from egress port.
-+ */
-+struct ppe_port_schedule_config {
-+ unsigned int port;
-+ bool flow_level;
-+ unsigned int node_id;
-+ unsigned int loop_num;
-+ unsigned int pri_max;
-+ unsigned int flow_id;
-+ unsigned int drr_node_id;
-+};
-+
- static int ipq9574_ppe_bm_group_config = 1550;
- static struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
- {
-@@ -137,6 +192,707 @@ static struct ppe_qm_queue_config ipq957
- },
- };
-
-+static struct ppe_sch_bm_config ipq9574_ppe_sch_bm_config[] = {
-+ {1, 0, 0, 0, 0},
-+ {1, 1, 0, 0, 0},
-+ {1, 0, 5, 0, 0},
-+ {1, 1, 5, 0, 0},
-+ {1, 0, 6, 0, 0},
-+ {1, 1, 6, 0, 0},
-+ {1, 0, 1, 0, 0},
-+ {1, 1, 1, 0, 0},
-+ {1, 0, 0, 0, 0},
-+ {1, 1, 0, 0, 0},
-+ {1, 0, 5, 0, 0},
-+ {1, 1, 5, 0, 0},
-+ {1, 0, 6, 0, 0},
-+ {1, 1, 6, 0, 0},
-+ {1, 0, 7, 0, 0},
-+ {1, 1, 7, 0, 0},
-+ {1, 0, 0, 0, 0},
-+ {1, 1, 0, 0, 0},
-+ {1, 0, 1, 0, 0},
-+ {1, 1, 1, 0, 0},
-+ {1, 0, 5, 0, 0},
-+ {1, 1, 5, 0, 0},
-+ {1, 0, 6, 0, 0},
-+ {1, 1, 6, 0, 0},
-+ {1, 0, 2, 0, 0},
-+ {1, 1, 2, 0, 0},
-+ {1, 0, 0, 0, 0},
-+ {1, 1, 0, 0, 0},
-+ {1, 0, 5, 0, 0},
-+ {1, 1, 5, 0, 0},
-+ {1, 0, 6, 0, 0},
-+ {1, 1, 6, 0, 0},
-+ {1, 0, 1, 0, 0},
-+ {1, 1, 1, 0, 0},
-+ {1, 0, 3, 0, 0},
-+ {1, 1, 3, 0, 0},
-+ {1, 0, 0, 0, 0},
-+ {1, 1, 0, 0, 0},
-+ {1, 0, 5, 0, 0},
-+ {1, 1, 5, 0, 0},
-+ {1, 0, 6, 0, 0},
-+ {1, 1, 6, 0, 0},
-+ {1, 0, 7, 0, 0},
-+ {1, 1, 7, 0, 0},
-+ {1, 0, 0, 0, 0},
-+ {1, 1, 0, 0, 0},
-+ {1, 0, 1, 0, 0},
-+ {1, 1, 1, 0, 0},
-+ {1, 0, 5, 0, 0},
-+ {1, 1, 5, 0, 0},
-+ {1, 0, 6, 0, 0},
-+ {1, 1, 6, 0, 0},
-+ {1, 0, 4, 0, 0},
-+ {1, 1, 4, 0, 0},
-+ {1, 0, 0, 0, 0},
-+ {1, 1, 0, 0, 0},
-+ {1, 0, 5, 0, 0},
-+ {1, 1, 5, 0, 0},
-+ {1, 0, 6, 0, 0},
-+ {1, 1, 6, 0, 0},
-+ {1, 0, 1, 0, 0},
-+ {1, 1, 1, 0, 0},
-+ {1, 0, 0, 0, 0},
-+ {1, 1, 0, 0, 0},
-+ {1, 0, 5, 0, 0},
-+ {1, 1, 5, 0, 0},
-+ {1, 0, 6, 0, 0},
-+ {1, 1, 6, 0, 0},
-+ {1, 0, 2, 0, 0},
-+ {1, 1, 2, 0, 0},
-+ {1, 0, 0, 0, 0},
-+ {1, 1, 0, 0, 0},
-+ {1, 0, 7, 0, 0},
-+ {1, 1, 7, 0, 0},
-+ {1, 0, 5, 0, 0},
-+ {1, 1, 5, 0, 0},
-+ {1, 0, 6, 0, 0},
-+ {1, 1, 6, 0, 0},
-+ {1, 0, 1, 0, 0},
-+ {1, 1, 1, 0, 0},
-+ {1, 0, 0, 0, 0},
-+ {1, 1, 0, 0, 0},
-+ {1, 0, 5, 0, 0},
-+ {1, 1, 5, 0, 0},
-+ {1, 0, 6, 0, 0},
-+ {1, 1, 6, 0, 0},
-+ {1, 0, 3, 0, 0},
-+ {1, 1, 3, 0, 0},
-+ {1, 0, 1, 0, 0},
-+ {1, 1, 1, 0, 0},
-+ {1, 0, 0, 0, 0},
-+ {1, 1, 0, 0, 0},
-+ {1, 0, 5, 0, 0},
-+ {1, 1, 5, 0, 0},
-+ {1, 0, 6, 0, 0},
-+ {1, 1, 6, 0, 0},
-+ {1, 0, 4, 0, 0},
-+ {1, 1, 4, 0, 0},
-+ {1, 0, 7, 0, 0},
-+ {1, 1, 7, 0, 0},
-+};
-+
-+static struct ppe_sch_schedule_config ipq9574_ppe_sch_schedule_config[] = {
-+ {0x98, 6, 0, 1, 1},
-+ {0x94, 5, 6, 1, 3},
-+ {0x86, 0, 5, 1, 4},
-+ {0x8C, 1, 6, 1, 0},
-+ {0x1C, 7, 5, 1, 1},
-+ {0x98, 2, 6, 1, 0},
-+ {0x1C, 5, 7, 1, 1},
-+ {0x34, 3, 6, 1, 0},
-+ {0x8C, 4, 5, 1, 1},
-+ {0x98, 2, 6, 1, 0},
-+ {0x8C, 5, 4, 1, 1},
-+ {0xA8, 0, 6, 1, 2},
-+ {0x98, 5, 1, 1, 0},
-+ {0x98, 6, 5, 1, 2},
-+ {0x89, 1, 6, 1, 4},
-+ {0xA4, 3, 0, 1, 1},
-+ {0x8C, 5, 6, 1, 4},
-+ {0xA8, 0, 2, 1, 1},
-+ {0x98, 6, 5, 1, 0},
-+ {0xC4, 4, 3, 1, 1},
-+ {0x94, 6, 5, 1, 0},
-+ {0x1C, 7, 6, 1, 1},
-+ {0x98, 2, 5, 1, 0},
-+ {0x1C, 6, 7, 1, 1},
-+ {0x1C, 5, 6, 1, 0},
-+ {0x94, 3, 5, 1, 1},
-+ {0x8C, 4, 6, 1, 0},
-+ {0x94, 1, 5, 1, 3},
-+ {0x94, 6, 1, 1, 0},
-+ {0xD0, 3, 5, 1, 2},
-+ {0x98, 6, 0, 1, 1},
-+ {0x94, 5, 6, 1, 3},
-+ {0x94, 1, 5, 1, 0},
-+ {0x98, 2, 6, 1, 1},
-+ {0x8C, 4, 5, 1, 0},
-+ {0x1C, 7, 6, 1, 1},
-+ {0x8C, 0, 5, 1, 4},
-+ {0x89, 1, 6, 1, 2},
-+ {0x98, 5, 0, 1, 1},
-+ {0x94, 6, 5, 1, 3},
-+ {0x92, 0, 6, 1, 2},
-+ {0x98, 1, 5, 1, 0},
-+ {0x98, 6, 2, 1, 1},
-+ {0xD0, 0, 5, 1, 3},
-+ {0x94, 6, 0, 1, 1},
-+ {0x8C, 5, 6, 1, 4},
-+ {0x8C, 1, 5, 1, 0},
-+ {0x1C, 6, 7, 1, 1},
-+ {0x1C, 5, 6, 1, 0},
-+ {0xB0, 2, 3, 1, 1},
-+ {0xC4, 4, 5, 1, 0},
-+ {0x8C, 6, 4, 1, 1},
-+ {0xA4, 3, 6, 1, 0},
-+ {0x1C, 5, 7, 1, 1},
-+ {0x4C, 0, 5, 1, 4},
-+ {0x8C, 6, 0, 1, 1},
-+ {0x34, 7, 6, 1, 3},
-+ {0x94, 5, 0, 1, 1},
-+ {0x98, 6, 5, 1, 2},
-+};
-+
-+static struct ppe_port_schedule_config ppe_qos_schedule_config[] = {
-+ {
-+ .port = 0,
-+ .flow_level = true,
-+ .node_id = 0,
-+ .loop_num = 1,
-+ .pri_max = 1,
-+ .flow_id = 0,
-+ .drr_node_id = 0,
-+ },
-+ {
-+ .port = 0,
-+ .flow_level = false,
-+ .node_id = 0,
-+ .loop_num = 8,
-+ .pri_max = 8,
-+ .flow_id = 0,
-+ .drr_node_id = 0,
-+ },
-+ {
-+ .port = 0,
-+ .flow_level = false,
-+ .node_id = 8,
-+ .loop_num = 8,
-+ .pri_max = 8,
-+ .flow_id = 0,
-+ .drr_node_id = 0,
-+ },
-+ {
-+ .port = 0,
-+ .flow_level = false,
-+ .node_id = 16,
-+ .loop_num = 8,
-+ .pri_max = 8,
-+ .flow_id = 0,
-+ .drr_node_id = 0,
-+ },
-+ {
-+ .port = 0,
-+ .flow_level = false,
-+ .node_id = 24,
-+ .loop_num = 8,
-+ .pri_max = 8,
-+ .flow_id = 0,
-+ .drr_node_id = 0,
-+ },
-+ {
-+ .port = 0,
-+ .flow_level = false,
-+ .node_id = 32,
-+ .loop_num = 8,
-+ .pri_max = 8,
-+ .flow_id = 0,
-+ .drr_node_id = 0,
-+ },
-+ {
-+ .port = 0,
-+ .flow_level = false,
-+ .node_id = 40,
-+ .loop_num = 8,
-+ .pri_max = 8,
-+ .flow_id = 0,
-+ .drr_node_id = 0,
-+ },
-+ {
-+ .port = 0,
-+ .flow_level = false,
-+ .node_id = 48,
-+ .loop_num = 8,
-+ .pri_max = 8,
-+ .flow_id = 0,
-+ .drr_node_id = 0,
-+ },
-+ {
-+ .port = 0,
-+ .flow_level = false,
-+ .node_id = 56,
-+ .loop_num = 8,
-+ .pri_max = 8,
-+ .flow_id = 0,
-+ .drr_node_id = 0,
-+ },
-+ {
-+ .port = 0,
-+ .flow_level = false,
-+ .node_id = 256,
-+ .loop_num = 8,
-+ .pri_max = 8,
-+ .flow_id = 0,
-+ .drr_node_id = 0,
-+ },
-+ {
-+ .port = 0,
-+ .flow_level = false,
-+ .node_id = 264,
-+ .loop_num = 8,
-+ .pri_max = 8,
-+ .flow_id = 0,
-+ .drr_node_id = 0,
-+ },
-+ {
-+ .port = 1,
-+ .flow_level = true,
-+ .node_id = 36,
-+ .loop_num = 2,
-+ .pri_max = 0,
-+ .flow_id = 1,
-+ .drr_node_id = 8,
-+ },
-+ {
-+ .port = 1,
-+ .flow_level = false,
-+ .node_id = 144,
-+ .loop_num = 16,
-+ .pri_max = 8,
-+ .flow_id = 36,
-+ .drr_node_id = 48,
-+ },
-+ {
-+ .port = 1,
-+ .flow_level = false,
-+ .node_id = 272,
-+ .loop_num = 4,
-+ .pri_max = 4,
-+ .flow_id = 36,
-+ .drr_node_id = 48,
-+ },
-+ {
-+ .port = 2,
-+ .flow_level = true,
-+ .node_id = 40,
-+ .loop_num = 2,
-+ .pri_max = 0,
-+ .flow_id = 2,
-+ .drr_node_id = 12,
-+ },
-+ {
-+ .port = 2,
-+ .flow_level = false,
-+ .node_id = 160,
-+ .loop_num = 16,
-+ .pri_max = 8,
-+ .flow_id = 40,
-+ .drr_node_id = 64,
-+ },
-+ {
-+ .port = 2,
-+ .flow_level = false,
-+ .node_id = 276,
-+ .loop_num = 4,
-+ .pri_max = 4,
-+ .flow_id = 40,
-+ .drr_node_id = 64,
-+ },
-+ {
-+ .port = 3,
-+ .flow_level = true,
-+ .node_id = 44,
-+ .loop_num = 2,
-+ .pri_max = 0,
-+ .flow_id = 3,
-+ .drr_node_id = 16,
-+ },
-+ {
-+ .port = 3,
-+ .flow_level = false,
-+ .node_id = 176,
-+ .loop_num = 16,
-+ .pri_max = 8,
-+ .flow_id = 44,
-+ .drr_node_id = 80,
-+ },
-+ {
-+ .port = 3,
-+ .flow_level = false,
-+ .node_id = 280,
-+ .loop_num = 4,
-+ .pri_max = 4,
-+ .flow_id = 44,
-+ .drr_node_id = 80,
-+ },
-+ {
-+ .port = 4,
-+ .flow_level = true,
-+ .node_id = 48,
-+ .loop_num = 2,
-+ .pri_max = 0,
-+ .flow_id = 4,
-+ .drr_node_id = 20,
-+ },
-+ {
-+ .port = 4,
-+ .flow_level = false,
-+ .node_id = 192,
-+ .loop_num = 16,
-+ .pri_max = 8,
-+ .flow_id = 48,
-+ .drr_node_id = 96,
-+ },
-+ {
-+ .port = 4,
-+ .flow_level = false,
-+ .node_id = 284,
-+ .loop_num = 4,
-+ .pri_max = 4,
-+ .flow_id = 48,
-+ .drr_node_id = 96,
-+ },
-+ {
-+ .port = 5,
-+ .flow_level = true,
-+ .node_id = 52,
-+ .loop_num = 2,
-+ .pri_max = 0,
-+ .flow_id = 5,
-+ .drr_node_id = 24,
-+ },
-+ {
-+ .port = 5,
-+ .flow_level = false,
-+ .node_id = 208,
-+ .loop_num = 16,
-+ .pri_max = 8,
-+ .flow_id = 52,
-+ .drr_node_id = 112,
-+ },
-+ {
-+ .port = 5,
-+ .flow_level = false,
-+ .node_id = 288,
-+ .loop_num = 4,
-+ .pri_max = 4,
-+ .flow_id = 52,
-+ .drr_node_id = 112,
-+ },
-+ {
-+ .port = 6,
-+ .flow_level = true,
-+ .node_id = 56,
-+ .loop_num = 2,
-+ .pri_max = 0,
-+ .flow_id = 6,
-+ .drr_node_id = 28,
-+ },
-+ {
-+ .port = 6,
-+ .flow_level = false,
-+ .node_id = 224,
-+ .loop_num = 16,
-+ .pri_max = 8,
-+ .flow_id = 56,
-+ .drr_node_id = 128,
-+ },
-+ {
-+ .port = 6,
-+ .flow_level = false,
-+ .node_id = 292,
-+ .loop_num = 4,
-+ .pri_max = 4,
-+ .flow_id = 56,
-+ .drr_node_id = 128,
-+ },
-+ {
-+ .port = 7,
-+ .flow_level = true,
-+ .node_id = 60,
-+ .loop_num = 2,
-+ .pri_max = 0,
-+ .flow_id = 7,
-+ .drr_node_id = 32,
-+ },
-+ {
-+ .port = 7,
-+ .flow_level = false,
-+ .node_id = 240,
-+ .loop_num = 16,
-+ .pri_max = 8,
-+ .flow_id = 60,
-+ .drr_node_id = 144,
-+ },
-+ {
-+ .port = 7,
-+ .flow_level = false,
-+ .node_id = 296,
-+ .loop_num = 4,
-+ .pri_max = 4,
-+ .flow_id = 60,
-+ .drr_node_id = 144,
-+ },
-+};
-+
-+/* Set the first level scheduler configuration. */
-+static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
-+ int node_id, int port,
-+ struct ppe_qos_scheduler_cfg scheduler_cfg)
-+{
-+ u32 val, reg;
-+ int ret;
-+
-+ reg = PPE_L0_FLOW_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_MAP_TBL_INC;
-+ val = FIELD_PREP(PPE_L0_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
-+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
-+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
-+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
-+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
-+
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ return ret;
-+
-+ reg = PPE_L0_C_FLOW_CFG_TBL_ADDR +
-+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
-+ PPE_L0_C_FLOW_CFG_TBL_INC;
-+ val = FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
-+ val |= FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.node_unit);
-+
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ return ret;
-+
-+ reg = PPE_L0_E_FLOW_CFG_TBL_ADDR +
-+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
-+ PPE_L0_E_FLOW_CFG_TBL_INC;
-+ val = FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
-+ val |= FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.node_unit);
-+
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ return ret;
-+
-+ reg = PPE_L0_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
-+ val = FIELD_PREP(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, port);
-+
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ return ret;
-+
-+ reg = PPE_L0_COMP_CFG_TBL_ADDR + node_id * PPE_L0_COMP_CFG_TBL_INC;
-+ val = FIELD_PREP(PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.node_frame_mode);
-+
-+ return regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_L0_COMP_CFG_TBL_NODE_METER_LEN,
-+ val);
-+}
-+
-+/* Get the first level scheduler configuration. */
-+static int ppe_scheduler_l0_queue_map_get(struct ppe_device *ppe_dev,
-+ int node_id, int *port,
-+ struct ppe_qos_scheduler_cfg *scheduler_cfg)
-+{
-+ u32 val, reg;
-+ int ret;
-+
-+ reg = PPE_L0_FLOW_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_MAP_TBL_INC;
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret)
-+ return ret;
-+
-+ scheduler_cfg->flow_id = FIELD_GET(PPE_L0_FLOW_MAP_TBL_FLOW_ID, val);
-+ scheduler_cfg->pri = FIELD_GET(PPE_L0_FLOW_MAP_TBL_C_PRI, val);
-+ scheduler_cfg->drr_node_wt = FIELD_GET(PPE_L0_FLOW_MAP_TBL_C_NODE_WT, val);
-+
-+ reg = PPE_L0_C_FLOW_CFG_TBL_ADDR +
-+ (scheduler_cfg->flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg->pri) *
-+ PPE_L0_C_FLOW_CFG_TBL_INC;
-+
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret)
-+ return ret;
-+
-+ scheduler_cfg->drr_node_id = FIELD_GET(PPE_L0_C_FLOW_CFG_TBL_NODE_ID, val);
-+ scheduler_cfg->node_unit = FIELD_GET(PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, val);
-+
-+ reg = PPE_L0_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret)
-+ return ret;
-+
-+ *port = FIELD_GET(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, val);
-+
-+ reg = PPE_L0_COMP_CFG_TBL_ADDR + node_id * PPE_L0_COMP_CFG_TBL_INC;
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret)
-+ return ret;
-+
-+ scheduler_cfg->node_frame_mode = FIELD_GET(PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, val);
-+
-+ return 0;
-+}
-+
-+/* Set the second level scheduler configuration. */
-+static int ppe_scheduler_l1_queue_map_set(struct ppe_device *ppe_dev,
-+ int node_id, int port,
-+ struct ppe_qos_scheduler_cfg scheduler_cfg)
-+{
-+ u32 val, reg;
-+ int ret;
-+
-+ val = FIELD_PREP(PPE_L1_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
-+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
-+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
-+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
-+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
-+ reg = PPE_L1_FLOW_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_MAP_TBL_INC;
-+
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ return ret;
-+
-+ val = FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
-+ val |= FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.node_unit);
-+ reg = PPE_L1_C_FLOW_CFG_TBL_ADDR +
-+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
-+ PPE_L1_C_FLOW_CFG_TBL_INC;
-+
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ return ret;
-+
-+ val = FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
-+ val |= FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.node_unit);
-+ reg = PPE_L1_E_FLOW_CFG_TBL_ADDR +
-+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
-+ PPE_L1_E_FLOW_CFG_TBL_INC;
-+
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ return ret;
-+
-+ val = FIELD_PREP(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, port);
-+ reg = PPE_L1_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
-+
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ return ret;
-+
-+ reg = PPE_L1_COMP_CFG_TBL_ADDR + node_id * PPE_L1_COMP_CFG_TBL_INC;
-+ val = FIELD_PREP(PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.node_frame_mode);
-+
-+ return regmap_update_bits(ppe_dev->regmap, reg, PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val);
-+}
-+
-+/* Get the second level scheduler configuration. */
-+static int ppe_scheduler_l1_queue_map_get(struct ppe_device *ppe_dev,
-+ int node_id, int *port,
-+ struct ppe_qos_scheduler_cfg *scheduler_cfg)
-+{
-+ u32 val, reg;
-+ int ret;
-+
-+ reg = PPE_L1_FLOW_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_MAP_TBL_INC;
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret)
-+ return ret;
-+
-+ scheduler_cfg->flow_id = FIELD_GET(PPE_L1_FLOW_MAP_TBL_FLOW_ID, val);
-+ scheduler_cfg->pri = FIELD_GET(PPE_L1_FLOW_MAP_TBL_C_PRI, val);
-+ scheduler_cfg->drr_node_wt = FIELD_GET(PPE_L1_FLOW_MAP_TBL_C_NODE_WT, val);
-+
-+ reg = PPE_L1_C_FLOW_CFG_TBL_ADDR +
-+ (scheduler_cfg->flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg->pri) *
-+ PPE_L1_C_FLOW_CFG_TBL_INC;
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret)
-+ return ret;
-+
-+ scheduler_cfg->drr_node_id = FIELD_GET(PPE_L1_C_FLOW_CFG_TBL_NODE_ID, val);
-+ scheduler_cfg->node_unit = FIELD_GET(PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, val);
-+
-+ reg = PPE_L1_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret)
-+ return ret;
-+
-+ *port = FIELD_GET(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, val);
-+
-+ reg = PPE_L1_COMP_CFG_TBL_ADDR + node_id * PPE_L1_COMP_CFG_TBL_INC;
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret)
-+ return ret;
-+
-+ scheduler_cfg->node_frame_mode = FIELD_GET(PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val);
-+
-+ return 0;
-+}
-+
-+/**
-+ * ppe_queue_scheduler_set - set QoS scheduler of PPE hardware queue
-+ * @ppe_dev: PPE device
-+ * @node_id: PPE node ID
-+ * @flow_level: Flow level scheduler or queue level scheduler
-+ * @port: PPE port ID set scheduler configuration
-+ * @scheduler_cfg: QoS scheduler configuration
-+ *
-+ * The hardware QoS function is supported by PPE, which is based on
-+ * PPE hardware queue scheduler of PPE port.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
-+ int node_id, bool flow_level, int port,
-+ struct ppe_qos_scheduler_cfg scheduler_cfg)
-+{
-+ if (flow_level)
-+ return ppe_scheduler_l1_queue_map_set(ppe_dev, node_id,
-+ port, scheduler_cfg);
-+
-+ return ppe_scheduler_l0_queue_map_set(ppe_dev, node_id,
-+ port, scheduler_cfg);
-+}
-+
-+/**
-+ * ppe_queue_scheduler_get - get QoS scheduler of PPE hardware queue
-+ * @ppe_dev: PPE device
-+ * @node_id: PPE node ID
-+ * @flow_level: Flow level scheduler or queue level scheduler
-+ * @port: PPE port ID to get scheduler config
-+ * @scheduler_cfg: QoS scheduler configuration
-+ *
-+ * The hardware QoS function is supported by PPE, the current scheduler
-+ * configuration can be acquired based on the queue ID of PPE port.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_queue_scheduler_get(struct ppe_device *ppe_dev,
-+ int node_id, bool flow_level, int *port,
-+ struct ppe_qos_scheduler_cfg *scheduler_cfg)
-+{
-+ if (flow_level)
-+ return ppe_scheduler_l1_queue_map_get(ppe_dev, node_id,
-+ port, scheduler_cfg);
-+
-+ return ppe_scheduler_l0_queue_map_get(ppe_dev, node_id,
-+ port, scheduler_cfg);
-+}
-+
- static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
- struct ppe_bm_port_config port_cfg)
- {
-@@ -343,6 +1099,126 @@ qm_config_fail:
- return ret;
- }
-
-+static int ppe_node_scheduler_config(struct ppe_device *ppe_dev,
-+ struct ppe_port_schedule_config config)
-+{
-+ struct ppe_qos_scheduler_cfg qos_cfg;
-+ int ret, i;
-+
-+ for (i = 0; i < config.loop_num; i++) {
-+ if (!config.pri_max) {
-+ /* Round robin scheduler without priority. */
-+ qos_cfg.flow_id = config.flow_id;
-+ qos_cfg.pri = 0;
-+ qos_cfg.drr_node_id = config.drr_node_id;
-+ } else {
-+ qos_cfg.flow_id = config.flow_id + (i / config.pri_max);
-+ qos_cfg.pri = i % config.pri_max;
-+ qos_cfg.drr_node_id = config.drr_node_id + i;
-+ }
-+
-+ /* Scheduler weight, must be more than 0. */
-+ qos_cfg.drr_node_wt = 1;
-+ /* Byte based to schedule. */
-+ qos_cfg.node_unit = 0;
-+ /* Frame + CRC calculated. */
-+ qos_cfg.node_frame_mode = 1;
-+
-+ ret = ppe_queue_scheduler_set(ppe_dev, config.node_id + i,
-+ config.flow_level,
-+ config.port,
-+ qos_cfg);
-+ if (ret) {
-+ dev_err(ppe_dev->dev, "PPE scheduler config error %d\n", ret);
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+/* Configure PPE offloaded QoS scheduler. */
-+static int ppe_config_qos(struct ppe_device *ppe_dev)
-+{
-+ int ret, i;
-+
-+ for (i = 0; i < ARRAY_SIZE(ppe_qos_schedule_config); i++) {
-+ if (ppe_qos_schedule_config[i].port >= ppe_dev->num_ports)
-+ break;
-+
-+ ret = ppe_node_scheduler_config(ppe_dev, ppe_qos_schedule_config[i]);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Configure scheduling management of PPE ports. */
-+static int ppe_config_scheduler(struct ppe_device *ppe_dev)
-+{
-+ struct ppe_sch_schedule_config *schedule_cfg;
-+ int ret, i, bm_count, schedule_count;
-+ struct ppe_sch_bm_config *bm_cfg;
-+ u32 val, reg;
-+
-+ bm_cfg = ipq9574_ppe_sch_bm_config;
-+ bm_count = ARRAY_SIZE(ipq9574_ppe_sch_bm_config);
-+
-+ schedule_cfg = ipq9574_ppe_sch_schedule_config;
-+ schedule_count = ARRAY_SIZE(ipq9574_ppe_sch_schedule_config);
-+
-+ val = FIELD_PREP(PPE_BM_SCH_CTRL_SCH_DEPTH, bm_count);
-+ val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_OFFSET, 0);
-+ val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_EN, 1);
-+
-+ ret = regmap_write(ppe_dev->regmap, PPE_BM_SCH_CTRL_ADDR, val);
-+ if (ret)
-+ goto sch_config_fail;
-+
-+ for (i = 0; i < bm_count; i++) {
-+ val = FIELD_PREP(PPE_BM_SCH_CFG_TBL_VALID, bm_cfg[i].valid);
-+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_DIR, bm_cfg[i].is_egress);
-+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_PORT_NUM, bm_cfg[i].port);
-+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID, bm_cfg[i].second_valid);
-+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT, bm_cfg[i].second_port);
-+
-+ reg = PPE_BM_SCH_CFG_TBL_ADDR + i * PPE_BM_SCH_CFG_TBL_INC;
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ goto sch_config_fail;
-+ }
-+
-+ val = FIELD_PREP(PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH, schedule_count);
-+ ret = regmap_write(ppe_dev->regmap, PPE_PSCH_SCH_DEPTH_CFG_ADDR, val);
-+ if (ret)
-+ goto sch_config_fail;
-+
-+ for (i = 0; i < schedule_count; i++) {
-+ val = FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP,
-+ schedule_cfg[i].ensch_port_bmp);
-+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT,
-+ schedule_cfg[i].ensch_port);
-+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_PORT,
-+ schedule_cfg[i].desch_port);
-+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN,
-+ schedule_cfg[i].desch_second_valid);
-+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT,
-+ schedule_cfg[i].desch_second_port);
-+ reg = PPE_PSCH_SCH_CFG_TBL_ADDR + i * PPE_PSCH_SCH_CFG_TBL_INC;
-+
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ goto sch_config_fail;
-+ }
-+
-+ return ppe_config_qos(ppe_dev);
-+
-+sch_config_fail:
-+ dev_err(ppe_dev->dev, "PPE scheduler arbitration config error %d\n", ret);
-+ return ret;
-+};
-+
- int ppe_hw_config(struct ppe_device *ppe_dev)
- {
- int ret;
-@@ -351,5 +1227,9 @@ int ppe_hw_config(struct ppe_device *ppe
- if (ret)
- return ret;
-
-- return ppe_config_qm(ppe_dev);
-+ ret = ppe_config_qm(ppe_dev);
-+ if (ret)
-+ return ret;
-+
-+ return ppe_config_scheduler(ppe_dev);
- }
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-@@ -6,5 +6,31 @@
- #ifndef __PPE_CONFIG_H__
- #define __PPE_CONFIG_H__
-
-+/**
-+ * struct ppe_qos_scheduler_cfg - PPE QoS scheduler configuration.
-+ * @flow_id: PPE flow ID.
-+ * @pri: Scheduler priority.
-+ * @drr_node_id: Node ID for scheduled traffic.
-+ * @drr_node_wt: weight for scheduled traffic.
-+ * @node_unit : Unit for scheduled traffic.
-+ * @node_frame_mode: Packet mode to be scheduled.
-+ *
-+ * PPE QoS feature supports the commit and exceed traffic.
-+ */
-+struct ppe_qos_scheduler_cfg {
-+ int flow_id;
-+ int pri;
-+ int drr_node_id;
-+ int drr_node_wt;
-+ int node_unit;
-+ int node_frame_mode;
-+};
-+
- int ppe_hw_config(struct ppe_device *ppe_dev);
-+int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
-+ int node_id, bool flow_level, int port,
-+ struct ppe_qos_scheduler_cfg scheduler_cfg);
-+int ppe_queue_scheduler_get(struct ppe_device *ppe_dev,
-+ int node_id, bool flow_level, int *port,
-+ struct ppe_qos_scheduler_cfg *scheduler_cfg);
- #endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -11,14 +11,108 @@
- * BM port (0-7) is matched to EDMA port 0, BM port (8-13) is matched
- * to PPE physical port 1-6, BM port 14 is matched to EIP.
- */
-+#define PPE_BM_SCH_CTRL_ADDR 0xb000
-+#define PPE_BM_SCH_CTRL_NUM 1
-+#define PPE_BM_SCH_CTRL_INC 4
-+#define PPE_BM_SCH_CTRL_SCH_DEPTH GENMASK(7, 0)
-+#define PPE_BM_SCH_CTRL_SCH_OFFSET GENMASK(14, 8)
-+#define PPE_BM_SCH_CTRL_SCH_EN BIT(31)
-+
-+#define PPE_BM_SCH_CFG_TBL_ADDR 0xc000
-+#define PPE_BM_SCH_CFG_TBL_NUM 128
-+#define PPE_BM_SCH_CFG_TBL_INC 0x10
-+#define PPE_BM_SCH_CFG_TBL_PORT_NUM GENMASK(3, 0)
-+#define PPE_BM_SCH_CFG_TBL_DIR BIT(4)
-+#define PPE_BM_SCH_CFG_TBL_VALID BIT(5)
-+#define PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID BIT(6)
-+#define PPE_BM_SCH_CFG_TBL_SECOND_PORT GENMASK(11, 8)
-+
- #define PPE_EG_BRIDGE_CONFIG_ADDR 0x20044
- #define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN BIT(2)
-
-+#define PPE_PSCH_SCH_DEPTH_CFG_ADDR 0x400000
-+#define PPE_PSCH_SCH_DEPTH_CFG_NUM 1
-+#define PPE_PSCH_SCH_DEPTH_CFG_INC 4
-+#define PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH GENMASK(7, 0)
-+
-+#define PPE_L0_FLOW_MAP_TBL_ADDR 0x402000
-+#define PPE_L0_FLOW_MAP_TBL_NUM 300
-+#define PPE_L0_FLOW_MAP_TBL_INC 0x10
-+#define PPE_L0_FLOW_MAP_TBL_FLOW_ID GENMASK(5, 0)
-+#define PPE_L0_FLOW_MAP_TBL_C_PRI GENMASK(8, 6)
-+#define PPE_L0_FLOW_MAP_TBL_E_PRI GENMASK(11, 9)
-+#define PPE_L0_FLOW_MAP_TBL_C_NODE_WT GENMASK(21, 12)
-+#define PPE_L0_FLOW_MAP_TBL_E_NODE_WT GENMASK(31, 22)
-+
-+#define PPE_L0_C_FLOW_CFG_TBL_ADDR 0x404000
-+#define PPE_L0_C_FLOW_CFG_TBL_NUM 512
-+#define PPE_L0_C_FLOW_CFG_TBL_INC 0x10
-+#define PPE_L0_C_FLOW_CFG_TBL_NODE_ID GENMASK(7, 0)
-+#define PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(8)
-+
-+#define PPE_L0_E_FLOW_CFG_TBL_ADDR 0x406000
-+#define PPE_L0_E_FLOW_CFG_TBL_NUM 512
-+#define PPE_L0_E_FLOW_CFG_TBL_INC 0x10
-+#define PPE_L0_E_FLOW_CFG_TBL_NODE_ID GENMASK(7, 0)
-+#define PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(8)
-+
-+#define PPE_L0_FLOW_PORT_MAP_TBL_ADDR 0x408000
-+#define PPE_L0_FLOW_PORT_MAP_TBL_NUM 300
-+#define PPE_L0_FLOW_PORT_MAP_TBL_INC 0x10
-+#define PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM GENMASK(3, 0)
-+
-+#define PPE_L0_COMP_CFG_TBL_ADDR 0x428000
-+#define PPE_L0_COMP_CFG_TBL_NUM 300
-+#define PPE_L0_COMP_CFG_TBL_INC 0x10
-+#define PPE_L0_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
-+#define PPE_L0_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
-+
- #define PPE_DEQ_OPR_TBL_ADDR 0x430000
- #define PPE_DEQ_OPR_TBL_NUM 300
- #define PPE_DEQ_OPR_TBL_INC 0x10
- #define PPE_DEQ_OPR_TBL_DEQ_DISABLE BIT(0)
-
-+#define PPE_L1_FLOW_MAP_TBL_ADDR 0x440000
-+#define PPE_L1_FLOW_MAP_TBL_NUM 64
-+#define PPE_L1_FLOW_MAP_TBL_INC 0x10
-+#define PPE_L1_FLOW_MAP_TBL_FLOW_ID GENMASK(3, 0)
-+#define PPE_L1_FLOW_MAP_TBL_C_PRI GENMASK(6, 4)
-+#define PPE_L1_FLOW_MAP_TBL_E_PRI GENMASK(9, 7)
-+#define PPE_L1_FLOW_MAP_TBL_C_NODE_WT GENMASK(19, 10)
-+#define PPE_L1_FLOW_MAP_TBL_E_NODE_WT GENMASK(29, 20)
-+
-+#define PPE_L1_C_FLOW_CFG_TBL_ADDR 0x442000
-+#define PPE_L1_C_FLOW_CFG_TBL_NUM 64
-+#define PPE_L1_C_FLOW_CFG_TBL_INC 0x10
-+#define PPE_L1_C_FLOW_CFG_TBL_NODE_ID GENMASK(5, 0)
-+#define PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(6)
-+
-+#define PPE_L1_E_FLOW_CFG_TBL_ADDR 0x444000
-+#define PPE_L1_E_FLOW_CFG_TBL_NUM 64
-+#define PPE_L1_E_FLOW_CFG_TBL_INC 0x10
-+#define PPE_L1_E_FLOW_CFG_TBL_NODE_ID GENMASK(5, 0)
-+#define PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(6)
-+
-+#define PPE_L1_FLOW_PORT_MAP_TBL_ADDR 0x446000
-+#define PPE_L1_FLOW_PORT_MAP_TBL_NUM 64
-+#define PPE_L1_FLOW_PORT_MAP_TBL_INC 0x10
-+#define PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM GENMASK(3, 0)
-+
-+#define PPE_L1_COMP_CFG_TBL_ADDR 0x46a000
-+#define PPE_L1_COMP_CFG_TBL_NUM 64
-+#define PPE_L1_COMP_CFG_TBL_INC 0x10
-+#define PPE_L1_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
-+#define PPE_L1_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
-+
-+#define PPE_PSCH_SCH_CFG_TBL_ADDR 0x47a000
-+#define PPE_PSCH_SCH_CFG_TBL_NUM 128
-+#define PPE_PSCH_SCH_CFG_TBL_INC 0x10
-+#define PPE_PSCH_SCH_CFG_TBL_DES_PORT GENMASK(3, 0)
-+#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT GENMASK(7, 4)
-+#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP GENMASK(15, 8)
-+#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN BIT(16)
-+#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT GENMASK(20, 17)
-+
- #define PPE_BM_PORT_FC_MODE_ADDR 0x600100
- #define PPE_BM_PORT_FC_MODE_INC 0x4
- #define PPE_BM_PORT_FC_MODE_EN BIT(0)
+++ /dev/null
-From 8dd72bdbb1e3f0061f2e4a9bb4f6fce0966585a6 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 27 Dec 2023 13:13:46 +0800
-Subject: [PATCH 22/50] net: ethernet: qualcomm: Initialize PPE queue settings
-
-Configure unicast and multicast hardware queues to forward
-the traffic between PPE ports.
-
-Each PPE port is assigned with the specific queue resource,
-and the egress queue ID is decided by the priority and the
-RSS hash value of packet.
-
-Change-Id: I3e4d4e12548a12b11f129106678375cc3b58828d
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/ppe_api.c | 44 +++
- drivers/net/ethernet/qualcomm/ppe/ppe_api.h | 34 ++
- .../net/ethernet/qualcomm/ppe/ppe_config.c | 362 +++++++++++++++++-
- .../net/ethernet/qualcomm/ppe/ppe_config.h | 41 ++
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 16 +
- 5 files changed, 496 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
-@@ -38,3 +38,47 @@ int ppe_queue_priority_set(struct ppe_de
-
- return ppe_queue_scheduler_set(ppe_dev, node_id, level, port, sch_cfg);
- }
-+
-+/**
-+ * ppe_edma_queue_offset_config - Configure queue offset for EDMA interface
-+ * @ppe_dev: PPE device
-+ * @class: The class to configure queue offset
-+ * @index: Class index, internal priority or hash value
-+ * @queue_offset: Queue offset value
-+ *
-+ * PPE EDMA queue offset is configured based on the PPE internal priority or
-+ * RSS hash value, the profile ID is fixed to 0 for EDMA interface.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
-+ enum ppe_queue_class_type class,
-+ int index, int queue_offset)
-+{
-+ if (class == PPE_QUEUE_CLASS_PRIORITY)
-+ return ppe_queue_ucast_pri_class_set(ppe_dev, 0,
-+ index, queue_offset);
-+
-+ return ppe_queue_ucast_hash_class_set(ppe_dev, 0,
-+ index, queue_offset);
-+}
-+
-+/**
-+ * ppe_edma_queue_resource_get - Get EDMA queue resource
-+ * @ppe_dev: PPE device
-+ * @type: Resource type
-+ * @res_start: Resource start ID returned
-+ * @res_end: Resource end ID returned
-+ *
-+ * PPE EDMA queue resource includes unicast queue and multicast queue.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
-+ int *res_start, int *res_end)
-+{
-+ if (type != PPE_RES_UCAST && type != PPE_RES_MCAST)
-+ return -EINVAL;
-+
-+ return ppe_port_resource_get(ppe_dev, 0, type, res_start, res_end);
-+};
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
-@@ -15,7 +15,41 @@
- #define PPE_QUEUE_ID_NUM 300
- #define PPE_FLOW_ID_NUM 64
- #define PPE_QUEUE_SCH_PRI_NUM 8
-+#define PPE_QUEUE_INTER_PRI_NUM 16
-+#define PPE_QUEUE_HASH_NUM 256
-+
-+/**
-+ * enum ppe_queue_class_type - PPE queue class type
-+ * @PPE_QUEUE_CLASS_PRIORITY: Queue offset configured from internal priority
-+ * @PPE_QUEUE_CLASS_HASH: Queue offset configured from RSS hash.
-+ */
-+enum ppe_queue_class_type {
-+ PPE_QUEUE_CLASS_PRIORITY,
-+ PPE_QUEUE_CLASS_HASH,
-+};
-+
-+/**
-+ * enum ppe_resource_type - PPE resource type
-+ * @PPE_RES_UCAST: Unicast queue resource
-+ * @PPE_RES_MCAST: Multicast queue resource
-+ * @PPE_RES_FLOW_ID: Flow resource
-+ * @PPE_RES_L0_NODE: Level 0 QoS node resource
-+ * @PPE_RES_L1_NODE: Level 1 QoS node resource
-+ */
-+enum ppe_resource_type {
-+ PPE_RES_UCAST,
-+ PPE_RES_MCAST,
-+ PPE_RES_FLOW_ID,
-+ PPE_RES_L0_NODE,
-+ PPE_RES_L1_NODE,
-+};
-
- int ppe_queue_priority_set(struct ppe_device *ppe_dev,
- int queue_id, int priority);
-+
-+int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
-+ enum ppe_queue_class_type class,
-+ int index, int queue_offset);
-+int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
-+ int *res_start, int *res_end);
- #endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-@@ -119,6 +119,34 @@ struct ppe_port_schedule_config {
- unsigned int drr_node_id;
- };
-
-+/**
-+ * struct ppe_port_schedule_resource - PPE port scheduler resource.
-+ * @ucastq_start: Unicast queue start ID.
-+ * @ucastq_end: Unicast queue end ID.
-+ * @mcastq_start: Multicast queue start ID.
-+ * @mcastq_end: Multicast queue end ID.
-+ * @flow_id_start: Flow start ID.
-+ * @flow_id_end: Flow end ID.
-+ * @l0node_start: Scheduler node start ID for queue level.
-+ * @l0node_end: Scheduler node end ID for queue level.
-+ * @l1node_start: Scheduler node start ID for flow level.
-+ * @l1node_end: Scheduler node end ID for flow level.
-+ *
-+ * PPE scheduler resource allocated among the PPE ports.
-+ */
-+struct ppe_port_schedule_resource {
-+ unsigned int ucastq_start;
-+ unsigned int ucastq_end;
-+ unsigned int mcastq_start;
-+ unsigned int mcastq_end;
-+ unsigned int flow_id_start;
-+ unsigned int flow_id_end;
-+ unsigned int l0node_start;
-+ unsigned int l0node_end;
-+ unsigned int l1node_start;
-+ unsigned int l1node_end;
-+};
-+
- static int ipq9574_ppe_bm_group_config = 1550;
- static struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
- {
-@@ -648,6 +676,111 @@ static struct ppe_port_schedule_config p
- },
- };
-
-+/* The QoS resource is applied to each PPE port, there are some
-+ * resource reserved as the last one.
-+ */
-+static struct ppe_port_schedule_resource ppe_scheduler_res[] = {
-+ { .ucastq_start = 0,
-+ .ucastq_end = 63,
-+ .mcastq_start = 256,
-+ .ucastq_end = 271,
-+ .flow_id_start = 0,
-+ .flow_id_end = 0,
-+ .l0node_start = 0,
-+ .l0node_end = 7,
-+ .l1node_start = 0,
-+ .l1node_end = 0,
-+ },
-+ { .ucastq_start = 144,
-+ .ucastq_end = 159,
-+ .mcastq_start = 272,
-+ .ucastq_end = 275,
-+ .flow_id_start = 36,
-+ .flow_id_end = 39,
-+ .l0node_start = 48,
-+ .l0node_end = 63,
-+ .l1node_start = 8,
-+ .l1node_end = 11,
-+ },
-+ { .ucastq_start = 160,
-+ .ucastq_end = 175,
-+ .mcastq_start = 276,
-+ .ucastq_end = 279,
-+ .flow_id_start = 40,
-+ .flow_id_end = 43,
-+ .l0node_start = 64,
-+ .l0node_end = 79,
-+ .l1node_start = 12,
-+ .l1node_end = 15,
-+ },
-+ { .ucastq_start = 176,
-+ .ucastq_end = 191,
-+ .mcastq_start = 280,
-+ .ucastq_end = 283,
-+ .flow_id_start = 44,
-+ .flow_id_end = 47,
-+ .l0node_start = 80,
-+ .l0node_end = 95,
-+ .l1node_start = 16,
-+ .l1node_end = 19,
-+ },
-+ { .ucastq_start = 192,
-+ .ucastq_end = 207,
-+ .mcastq_start = 284,
-+ .ucastq_end = 287,
-+ .flow_id_start = 48,
-+ .flow_id_end = 51,
-+ .l0node_start = 96,
-+ .l0node_end = 111,
-+ .l1node_start = 20,
-+ .l1node_end = 23,
-+ },
-+ { .ucastq_start = 208,
-+ .ucastq_end = 223,
-+ .mcastq_start = 288,
-+ .ucastq_end = 291,
-+ .flow_id_start = 52,
-+ .flow_id_end = 55,
-+ .l0node_start = 112,
-+ .l0node_end = 127,
-+ .l1node_start = 24,
-+ .l1node_end = 27,
-+ },
-+ { .ucastq_start = 224,
-+ .ucastq_end = 239,
-+ .mcastq_start = 292,
-+ .ucastq_end = 295,
-+ .flow_id_start = 56,
-+ .flow_id_end = 59,
-+ .l0node_start = 128,
-+ .l0node_end = 143,
-+ .l1node_start = 28,
-+ .l1node_end = 31,
-+ },
-+ { .ucastq_start = 240,
-+ .ucastq_end = 255,
-+ .mcastq_start = 296,
-+ .ucastq_end = 299,
-+ .flow_id_start = 60,
-+ .flow_id_end = 63,
-+ .l0node_start = 144,
-+ .l0node_end = 159,
-+ .l1node_start = 32,
-+ .l1node_end = 35,
-+ },
-+ { .ucastq_start = 64,
-+ .ucastq_end = 143,
-+ .mcastq_start = 0,
-+ .ucastq_end = 0,
-+ .flow_id_start = 1,
-+ .flow_id_end = 35,
-+ .l0node_start = 8,
-+ .l0node_end = 47,
-+ .l1node_start = 1,
-+ .l1node_end = 7,
-+ },
-+};
-+
- /* Set the first level scheduler configuration. */
- static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
- int node_id, int port,
-@@ -893,6 +1026,147 @@ int ppe_queue_scheduler_get(struct ppe_d
- port, scheduler_cfg);
- }
-
-+/**
-+ * ppe_queue_ucast_base_set - Set PPE unicast queue base ID
-+ * @ppe_dev: PPE device
-+ * @queue_dst: PPE queue destination configuration
-+ * @queue_base: PPE queue base ID
-+ * @profile_id: Profile ID
-+ *
-+ * The PPE unicast queue base ID is configured based on the destination
-+ * port information per profile ID.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
-+ struct ppe_queue_ucast_dest queue_dst,
-+ int queue_base, int profile_id)
-+{
-+ int index, profile_size;
-+ u32 val, reg;
-+
-+ profile_size = queue_dst.src_profile << 8;
-+ if (queue_dst.service_code_en)
-+ index = PPE_QUEUE_BASE_SERVICE_CODE + profile_size +
-+ queue_dst.service_code;
-+ else if (queue_dst.cpu_code_en)
-+ index = PPE_QUEUE_BASE_CPU_CODE + profile_size +
-+ queue_dst.cpu_code;
-+ else
-+ index = profile_size + queue_dst.dest_port;
-+
-+ val = FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID, profile_id);
-+ val |= FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID, queue_base);
-+ reg = PPE_UCAST_QUEUE_MAP_TBL_ADDR + index * PPE_UCAST_QUEUE_MAP_TBL_INC;
-+
-+ return regmap_write(ppe_dev->regmap, reg, val);
-+}
-+
-+/**
-+ * ppe_queue_ucast_pri_class_set - Set PPE unicast queue class of priority
-+ * @ppe_dev: PPE device
-+ * @profile_id: Profile ID
-+ * @priority: Priority to be used to set class
-+ * @class_offset: Class value for the destination queue ID
-+ *
-+ * The PPE unicast queue class is configured based on the PPE
-+ * internal priority.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_queue_ucast_pri_class_set(struct ppe_device *ppe_dev,
-+ int profile_id,
-+ int priority,
-+ int class_offset)
-+{
-+ u32 val, reg;
-+ int index;
-+
-+ index = (profile_id << 4) + priority;
-+ val = FIELD_PREP(PPE_UCAST_PRIORITY_MAP_TBL_CLASS, class_offset);
-+ reg = PPE_UCAST_PRIORITY_MAP_TBL_ADDR + index * PPE_UCAST_PRIORITY_MAP_TBL_INC;
-+
-+ return regmap_write(ppe_dev->regmap, reg, val);
-+}
-+
-+/**
-+ * ppe_queue_ucast_hash_class_set - Set PPE unicast queue class of hash value
-+ * @ppe_dev: PPE device
-+ * @profile_id: Profile ID
-+ * @rss_hash: Hash value to be used to set clas
-+ * @class_offset: Class value for the destination queue ID
-+ *
-+ * The PPE unicast queue class is configured based on the RSS hash value.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_queue_ucast_hash_class_set(struct ppe_device *ppe_dev,
-+ int profile_id,
-+ int rss_hash,
-+ int class_offset)
-+{
-+ u32 val, reg;
-+ int index;
-+
-+ index = (profile_id << 8) + rss_hash;
-+ val = FIELD_PREP(PPE_UCAST_HASH_MAP_TBL_HASH, class_offset);
-+ reg = PPE_UCAST_HASH_MAP_TBL_ADDR + index * PPE_UCAST_HASH_MAP_TBL_INC;
-+
-+ return regmap_write(ppe_dev->regmap, reg, val);
-+}
-+
-+/**
-+ * ppe_port_resource_get - Get PPE resource per port
-+ * @ppe_dev: PPE device
-+ * @port: PPE port
-+ * @type: Resource type
-+ * @res_start: Resource start ID
-+ * @res_end: Resource end ID
-+ *
-+ * PPE resource is assigned per PPE port, which is acquired for QoS scheduler.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_port_resource_get(struct ppe_device *ppe_dev, int port, int type,
-+ int *res_start, int *res_end)
-+{
-+ struct ppe_port_schedule_resource res;
-+
-+ /* The reserved resource with the maximum port ID of PPE is
-+ * also allowed to be acquired.
-+ */
-+ if (port > ppe_dev->num_ports)
-+ return -EINVAL;
-+
-+ res = ppe_scheduler_res[port];
-+ switch (type) {
-+ case PPE_RES_UCAST:
-+ *res_start = res.ucastq_start;
-+ *res_end = res.ucastq_end;
-+ break;
-+ case PPE_RES_MCAST:
-+ *res_start = res.mcastq_start;
-+ *res_end = res.mcastq_end;
-+ break;
-+ case PPE_RES_FLOW_ID:
-+ *res_start = res.flow_id_start;
-+ *res_end = res.flow_id_end;
-+ break;
-+ case PPE_RES_L0_NODE:
-+ *res_start = res.l0node_start;
-+ *res_end = res.l0node_end;
-+ break;
-+ case PPE_RES_L1_NODE:
-+ *res_start = res.l1node_start;
-+ *res_end = res.l1node_end;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
- static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
- struct ppe_bm_port_config port_cfg)
- {
-@@ -1219,6 +1493,88 @@ sch_config_fail:
- return ret;
- };
-
-+/* Configure PPE queue destination of each PPE port. */
-+static int ppe_queue_dest_init(struct ppe_device *ppe_dev)
-+{
-+ int ret, port_id, index, class, res_start, res_end, queue_base, pri_max;
-+ struct ppe_queue_ucast_dest queue_dst;
-+
-+ for (port_id = 0; port_id < ppe_dev->num_ports; port_id++) {
-+ memset(&queue_dst, 0, sizeof(queue_dst));
-+
-+ ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_UCAST,
-+ &res_start, &res_end);
-+ if (ret)
-+ return ret;
-+
-+ queue_base = res_start;
-+ queue_dst.dest_port = port_id;
-+
-+ /* Configure queue base ID and profile ID that is same as
-+ * physical port ID.
-+ */
-+ ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
-+ queue_base, port_id);
-+ if (ret)
-+ return ret;
-+
-+ /* Queue priority range supported by each PPE port */
-+ ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_L0_NODE,
-+ &res_start, &res_end);
-+ if (ret)
-+ return ret;
-+
-+ pri_max = res_end - res_start;
-+
-+ /* Redirect ARP reply packet with the max priority on CPU port,
-+ * which keeps the ARP reply directed to CPU (CPU code is 101)
-+ * with highest priority received by EDMA when there is a heavy
-+ * traffic loaded.
-+ */
-+ if (port_id == 0) {
-+ memset(&queue_dst, 0, sizeof(queue_dst));
-+
-+ queue_dst.cpu_code_en = true;
-+ queue_dst.cpu_code = 101;
-+ ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
-+ queue_base + pri_max,
-+ 0);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ /* Initialize the class offset of internal priority. */
-+ for (index = 0; index < PPE_QUEUE_INTER_PRI_NUM; index++) {
-+ class = index > pri_max ? pri_max : index;
-+
-+ ret = ppe_queue_ucast_pri_class_set(ppe_dev, port_id,
-+ index, class);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ /* Initialize the class offset of RSS hash as 0 to avoid the
-+ * random hardware value that will lead to the unexpected
-+ * destination queue generated.
-+ */
-+ index = 0;
-+ for (index = 0; index < PPE_QUEUE_HASH_NUM; index++) {
-+ ret = ppe_queue_ucast_hash_class_set(ppe_dev, port_id,
-+ index, 0);
-+ if (ret)
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+/* Initialize PPE device to handle traffic correctly. */
-+static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
-+{
-+ return ppe_queue_dest_init(ppe_dev);
-+}
-+
- int ppe_hw_config(struct ppe_device *ppe_dev)
- {
- int ret;
-@@ -1231,5 +1587,9 @@ int ppe_hw_config(struct ppe_device *ppe
- if (ret)
- return ret;
-
-- return ppe_config_scheduler(ppe_dev);
-+ ret = ppe_config_scheduler(ppe_dev);
-+ if (ret)
-+ return ret;
-+
-+ return ppe_dev_hw_init(ppe_dev);
- }
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-@@ -6,6 +6,13 @@
- #ifndef __PPE_CONFIG_H__
- #define __PPE_CONFIG_H__
-
-+/* There are different queue config ranges for the destination port,
-+ * CPU code and service code.
-+ */
-+#define PPE_QUEUE_BASE_DEST_PORT 0
-+#define PPE_QUEUE_BASE_CPU_CODE 1024
-+#define PPE_QUEUE_BASE_SERVICE_CODE 2048
-+
- /**
- * struct ppe_qos_scheduler_cfg - PPE QoS scheduler configuration.
- * @flow_id: PPE flow ID.
-@@ -26,6 +33,26 @@ struct ppe_qos_scheduler_cfg {
- int node_frame_mode;
- };
-
-+/**
-+ * struct ppe_queue_ucast_dest - PPE unicast queue destination.
-+ * @src_profile: Source profile.
-+ * @service_code_en: Enable service code.
-+ * @service_code: Service code.
-+ * @cpu_code_en: Enable CPU code.
-+ * @cpu_code: CPU code.
-+ * @dest_port: destination port.
-+ *
-+ * PPE egress queue ID is decided by the egress port ID.
-+ */
-+struct ppe_queue_ucast_dest {
-+ int src_profile;
-+ bool service_code_en;
-+ int service_code;
-+ bool cpu_code_en;
-+ int cpu_code;
-+ int dest_port;
-+};
-+
- int ppe_hw_config(struct ppe_device *ppe_dev);
- int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
- int node_id, bool flow_level, int port,
-@@ -33,4 +60,18 @@ int ppe_queue_scheduler_set(struct ppe_d
- int ppe_queue_scheduler_get(struct ppe_device *ppe_dev,
- int node_id, bool flow_level, int *port,
- struct ppe_qos_scheduler_cfg *scheduler_cfg);
-+int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
-+ struct ppe_queue_ucast_dest queue_dst,
-+ int queue_base,
-+ int profile_id);
-+int ppe_queue_ucast_pri_class_set(struct ppe_device *ppe_dev,
-+ int profile_id,
-+ int priority,
-+ int class_offset);
-+int ppe_queue_ucast_hash_class_set(struct ppe_device *ppe_dev,
-+ int profile_id,
-+ int rss_hash,
-+ int class_offset);
-+int ppe_port_resource_get(struct ppe_device *ppe_dev, int port, int type,
-+ int *res_start, int *res_end);
- #endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -153,6 +153,22 @@
- #define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value) \
- u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_PRE_ALLOC)
-
-+#define PPE_UCAST_QUEUE_MAP_TBL_ADDR 0x810000
-+#define PPE_UCAST_QUEUE_MAP_TBL_NUM 3072
-+#define PPE_UCAST_QUEUE_MAP_TBL_INC 0x10
-+#define PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID GENMASK(3, 0)
-+#define PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID GENMASK(11, 4)
-+
-+#define PPE_UCAST_HASH_MAP_TBL_ADDR 0x830000
-+#define PPE_UCAST_HASH_MAP_TBL_NUM 4096
-+#define PPE_UCAST_HASH_MAP_TBL_INC 0x10
-+#define PPE_UCAST_HASH_MAP_TBL_HASH GENMASK(7, 0)
-+
-+#define PPE_UCAST_PRIORITY_MAP_TBL_ADDR 0x842000
-+#define PPE_UCAST_PRIORITY_MAP_TBL_NUM 256
-+#define PPE_UCAST_PRIORITY_MAP_TBL_INC 0x10
-+#define PPE_UCAST_PRIORITY_MAP_TBL_CLASS GENMASK(3, 0)
-+
- /* PPE unicast queue (0-255) configurations. */
- #define PPE_AC_UNI_QUEUE_CFG_TBL_ADDR 0x848000
- #define PPE_AC_UNI_QUEUE_CFG_TBL_NUM 256
+++ /dev/null
-From 278b9f94b1dd344e88739044dd20d407b7f0651f Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 27 Dec 2023 13:51:20 +0800
-Subject: [PATCH 23/50] net: ethernet: qualcomm: Add PPE service code config
-
-Configure service code for marking the traffic passed through
-PPE. Service code is generated according the features of packet
-when the packet is processed by PPE.
-
-The bypass features of service code 1 is configured by default,
-which used by CPU port when the packet is transmitted from host
-to the CPU port of PPE.
-
-Change-Id: I9fd2d26ba4c40e9ca182c20f5e02bd2f6f3e5e05
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/ppe_api.h | 3 +
- .../net/ethernet/qualcomm/ppe/ppe_config.c | 98 +++++++++++-
- .../net/ethernet/qualcomm/ppe/ppe_config.h | 142 ++++++++++++++++++
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 48 ++++++
- 4 files changed, 290 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
-@@ -18,6 +18,9 @@
- #define PPE_QUEUE_INTER_PRI_NUM 16
- #define PPE_QUEUE_HASH_NUM 256
-
-+/* The service code is used by EDMA driver to transmit packet to PPE. */
-+#define PPE_EDMA_SC_BYPASS_ID 1
-+
- /**
- * enum ppe_queue_class_type - PPE queue class type
- * @PPE_QUEUE_CLASS_PRIORITY: Queue offset configured from internal priority
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-@@ -8,6 +8,7 @@
- */
-
- #include <linux/bitfield.h>
-+#include <linux/bitmap.h>
- #include <linux/bits.h>
- #include <linux/device.h>
- #include <linux/regmap.h>
-@@ -1167,6 +1168,76 @@ int ppe_port_resource_get(struct ppe_dev
- return 0;
- }
-
-+/**
-+ * ppe_servcode_config_set - Set PPE service code configuration
-+ * @ppe_dev: PPE device
-+ * @servcode: Service ID, 0-255 supported by PPE
-+ * @cfg: Service code configuration
-+ *
-+ * The service code configuration of PPE is used to handle the PPE
-+ * functions.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_servcode_config_set(struct ppe_device *ppe_dev, int servcode,
-+ struct ppe_servcode_cfg cfg)
-+{
-+ u32 val, reg, servcode_val[2] = {};
-+ unsigned long bitmap_value;
-+ int ret;
-+
-+ val = FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID, cfg.dest_port_valid);
-+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID, cfg.dest_port);
-+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_DIRECTION, cfg.is_src);
-+
-+ bitmap_value = bitmap_read(cfg.bitmaps.egress, 0, PPE_SC_BYPASS_EGRESS_SIZE);
-+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP, bitmap_value);
-+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_RX_CNT_EN,
-+ test_bit(PPE_SC_BYPASS_COUNTER_RX, cfg.bitmaps.counter));
-+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_TX_CNT_EN,
-+ test_bit(PPE_SC_BYPASS_COUNTER_TX, cfg.bitmaps.counter));
-+ reg = PPE_IN_L2_SERVICE_TBL_ADDR + PPE_IN_L2_SERVICE_TBL_INC * servcode;
-+
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ return ret;
-+
-+ bitmap_value = bitmap_read(cfg.bitmaps.ingress, 0, PPE_SC_BYPASS_INGRESS_SIZE);
-+ PPE_SERVICE_SET_BYPASS_BITMAP(servcode_val, bitmap_value);
-+ PPE_SERVICE_SET_RX_CNT_EN(servcode_val,
-+ test_bit(PPE_SC_BYPASS_COUNTER_RX_VLAN, cfg.bitmaps.counter));
-+ reg = PPE_SERVICE_TBL_ADDR + PPE_SERVICE_TBL_INC * servcode;
-+
-+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
-+ servcode_val, ARRAY_SIZE(servcode_val));
-+ if (ret)
-+ return ret;
-+
-+ reg = PPE_EG_SERVICE_TBL_ADDR + PPE_EG_SERVICE_TBL_INC * servcode;
-+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
-+ servcode_val, ARRAY_SIZE(servcode_val));
-+ if (ret)
-+ return ret;
-+
-+ PPE_EG_SERVICE_SET_UPDATE_ACTION(servcode_val, cfg.field_update_bitmap);
-+ PPE_EG_SERVICE_SET_NEXT_SERVCODE(servcode_val, cfg.next_service_code);
-+ PPE_EG_SERVICE_SET_HW_SERVICE(servcode_val, cfg.hw_service);
-+ PPE_EG_SERVICE_SET_OFFSET_SEL(servcode_val, cfg.offset_sel);
-+ PPE_EG_SERVICE_SET_TX_CNT_EN(servcode_val,
-+ test_bit(PPE_SC_BYPASS_COUNTER_TX_VLAN, cfg.bitmaps.counter));
-+
-+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
-+ servcode_val, ARRAY_SIZE(servcode_val));
-+ if (ret)
-+ return ret;
-+
-+ bitmap_value = bitmap_read(cfg.bitmaps.tunnel, 0, PPE_SC_BYPASS_TUNNEL_SIZE);
-+ val = FIELD_PREP(PPE_TL_SERVICE_TBL_BYPASS_BITMAP, bitmap_value);
-+ reg = PPE_TL_SERVICE_TBL_ADDR + PPE_TL_SERVICE_TBL_INC * servcode;
-+
-+ return regmap_write(ppe_dev->regmap, reg, val);
-+}
-+
- static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
- struct ppe_bm_port_config port_cfg)
- {
-@@ -1569,10 +1640,35 @@ static int ppe_queue_dest_init(struct pp
- return 0;
- }
-
-+/* Initialize the service code 1 used by CPU port. */
-+static int ppe_servcode_init(struct ppe_device *ppe_dev)
-+{
-+ struct ppe_servcode_cfg servcode_cfg = {};
-+
-+ bitmap_zero(servcode_cfg.bitmaps.counter, PPE_SC_BYPASS_COUNTER_SIZE);
-+ bitmap_zero(servcode_cfg.bitmaps.tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
-+
-+ bitmap_fill(servcode_cfg.bitmaps.ingress, PPE_SC_BYPASS_INGRESS_SIZE);
-+ clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER, servcode_cfg.bitmaps.ingress);
-+ clear_bit(PPE_SC_BYPASS_INGRESS_SERVICE_CODE, servcode_cfg.bitmaps.ingress);
-+ clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO, servcode_cfg.bitmaps.ingress);
-+
-+ bitmap_fill(servcode_cfg.bitmaps.egress, PPE_SC_BYPASS_EGRESS_SIZE);
-+ clear_bit(PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK, servcode_cfg.bitmaps.egress);
-+
-+ return ppe_servcode_config_set(ppe_dev, PPE_EDMA_SC_BYPASS_ID, servcode_cfg);
-+}
-+
- /* Initialize PPE device to handle traffic correctly. */
- static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
- {
-- return ppe_queue_dest_init(ppe_dev);
-+ int ret;
-+
-+ ret = ppe_queue_dest_init(ppe_dev);
-+ if (ret)
-+ return ret;
-+
-+ return ppe_servcode_init(ppe_dev);
- }
-
- int ppe_hw_config(struct ppe_device *ppe_dev)
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-@@ -6,6 +6,8 @@
- #ifndef __PPE_CONFIG_H__
- #define __PPE_CONFIG_H__
-
-+#include <linux/types.h>
-+
- /* There are different queue config ranges for the destination port,
- * CPU code and service code.
- */
-@@ -53,6 +55,143 @@ struct ppe_queue_ucast_dest {
- int dest_port;
- };
-
-+/* Hardware bitmaps for bypassing features of the ingress packet. */
-+enum ppe_sc_ingress_type {
-+ PPE_SC_BYPASS_INGRESS_VLAN_TAG_FMT_CHECK = 0,
-+ PPE_SC_BYPASS_INGRESS_VLAN_MEMBER_CHECK = 1,
-+ PPE_SC_BYPASS_INGRESS_VLAN_TRANSLATE = 2,
-+ PPE_SC_BYPASS_INGRESS_MY_MAC_CHECK = 3,
-+ PPE_SC_BYPASS_INGRESS_DIP_LOOKUP = 4,
-+ PPE_SC_BYPASS_INGRESS_FLOW_LOOKUP = 5,
-+ PPE_SC_BYPASS_INGRESS_FLOW_ACTION = 6,
-+ PPE_SC_BYPASS_INGRESS_ACL = 7,
-+ PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER = 8,
-+ PPE_SC_BYPASS_INGRESS_SERVICE_CODE = 9,
-+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L2 = 10,
-+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV4 = 11,
-+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV6 = 12,
-+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L4 = 13,
-+ PPE_SC_BYPASS_INGRESS_FLOW_SERVICE_CODE = 14,
-+ PPE_SC_BYPASS_INGRESS_ACL_SERVICE_CODE = 15,
-+ PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO = 16,
-+ PPE_SC_BYPASS_INGRESS_PPPOE_TERMINATION = 17,
-+ PPE_SC_BYPASS_INGRESS_DEFAULT_VLAN = 18,
-+ PPE_SC_BYPASS_INGRESS_DEFAULT_PCP = 19,
-+ PPE_SC_BYPASS_INGRESS_VSI_ASSIGN = 20,
-+ /* Values 21-23 are not specified by hardware. */
-+ PPE_SC_BYPASS_INGRESS_VLAN_ASSIGN_FAIL = 24,
-+ PPE_SC_BYPASS_INGRESS_SOURCE_GUARD = 25,
-+ PPE_SC_BYPASS_INGRESS_MRU_MTU_CHECK = 26,
-+ PPE_SC_BYPASS_INGRESS_FLOW_SRC_CHECK = 27,
-+ PPE_SC_BYPASS_INGRESS_FLOW_QOS = 28,
-+ /* This must be last as it determines the size of the BITMAP. */
-+ PPE_SC_BYPASS_INGRESS_SIZE,
-+};
-+
-+/* Hardware bitmaps for bypassing features of the egress packet. */
-+enum ppe_sc_egress_type {
-+ PPE_SC_BYPASS_EGRESS_VLAN_MEMBER_CHECK = 0,
-+ PPE_SC_BYPASS_EGRESS_VLAN_TRANSLATE = 1,
-+ PPE_SC_BYPASS_EGRESS_VLAN_TAG_FMT_CTRL = 2,
-+ PPE_SC_BYPASS_EGRESS_FDB_LEARN = 3,
-+ PPE_SC_BYPASS_EGRESS_FDB_REFRESH = 4,
-+ PPE_SC_BYPASS_EGRESS_L2_SOURCE_SECURITY = 5,
-+ PPE_SC_BYPASS_EGRESS_MANAGEMENT_FWD = 6,
-+ PPE_SC_BYPASS_EGRESS_BRIDGING_FWD = 7,
-+ PPE_SC_BYPASS_EGRESS_IN_STP_FLTR = 8,
-+ PPE_SC_BYPASS_EGRESS_EG_STP_FLTR = 9,
-+ PPE_SC_BYPASS_EGRESS_SOURCE_FLTR = 10,
-+ PPE_SC_BYPASS_EGRESS_POLICER = 11,
-+ PPE_SC_BYPASS_EGRESS_L2_PKT_EDIT = 12,
-+ PPE_SC_BYPASS_EGRESS_L3_PKT_EDIT = 13,
-+ PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK = 14,
-+ PPE_SC_BYPASS_EGRESS_PORT_ISOLATION = 15,
-+ PPE_SC_BYPASS_EGRESS_PRE_ACL_QOS = 16,
-+ PPE_SC_BYPASS_EGRESS_POST_ACL_QOS = 17,
-+ PPE_SC_BYPASS_EGRESS_DSCP_QOS = 18,
-+ PPE_SC_BYPASS_EGRESS_PCP_QOS = 19,
-+ PPE_SC_BYPASS_EGRESS_PREHEADER_QOS = 20,
-+ PPE_SC_BYPASS_EGRESS_FAKE_MAC_DROP = 21,
-+ PPE_SC_BYPASS_EGRESS_TUNL_CONTEXT = 22,
-+ PPE_SC_BYPASS_EGRESS_FLOW_POLICER = 23,
-+ /* This must be last as it determines the size of the BITMAP. */
-+ PPE_SC_BYPASS_EGRESS_SIZE,
-+};
-+
-+/* Hardware bitmaps for bypassing counter of packet. */
-+enum ppe_sc_counter_type {
-+ PPE_SC_BYPASS_COUNTER_RX_VLAN = 0,
-+ PPE_SC_BYPASS_COUNTER_RX = 1,
-+ PPE_SC_BYPASS_COUNTER_TX_VLAN = 2,
-+ PPE_SC_BYPASS_COUNTER_TX = 3,
-+ /* This must be last as it determines the size of the BITMAP. */
-+ PPE_SC_BYPASS_COUNTER_SIZE,
-+};
-+
-+/* Hardware bitmaps for bypassing features of tunnel packet. */
-+enum ppe_sc_tunnel_type {
-+ PPE_SC_BYPASS_TUNNEL_SERVICE_CODE = 0,
-+ PPE_SC_BYPASS_TUNNEL_TUNNEL_HANDLE = 1,
-+ PPE_SC_BYPASS_TUNNEL_L3_IF_CHECK = 2,
-+ PPE_SC_BYPASS_TUNNEL_VLAN_CHECK = 3,
-+ PPE_SC_BYPASS_TUNNEL_DMAC_CHECK = 4,
-+ PPE_SC_BYPASS_TUNNEL_UDP_CSUM_0_CHECK = 5,
-+ PPE_SC_BYPASS_TUNNEL_TBL_DE_ACCE_CHECK = 6,
-+ PPE_SC_BYPASS_TUNNEL_PPPOE_MC_TERM_CHECK = 7,
-+ PPE_SC_BYPASS_TUNNEL_TTL_EXCEED_CHECK = 8,
-+ PPE_SC_BYPASS_TUNNEL_MAP_SRC_CHECK = 9,
-+ PPE_SC_BYPASS_TUNNEL_MAP_DST_CHECK = 10,
-+ PPE_SC_BYPASS_TUNNEL_LPM_DST_LOOKUP = 11,
-+ PPE_SC_BYPASS_TUNNEL_LPM_LOOKUP = 12,
-+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L2 = 13,
-+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV4 = 14,
-+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV6 = 15,
-+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L4 = 16,
-+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_TUNNEL = 17,
-+ /* Values 18-19 are not specified by hardware. */
-+ PPE_SC_BYPASS_TUNNEL_PRE_IPO = 20,
-+ /* This must be last as it determines the size of the BITMAP. */
-+ PPE_SC_BYPASS_TUNNEL_SIZE,
-+};
-+
-+/**
-+ * struct ppe_sc_bypss - PPE service bypass bitmaps
-+ * @ingress: Bitmap of features that can be bypassed on the ingress packet.
-+ * @egress: Bitmap of features that can be bypassed on the egress packet.
-+ * @counter: Bitmap of features that can be bypassed on the counter type.
-+ * @tunnel: Bitmap of features that can be bypassed on the tunnel packet.
-+ */
-+struct ppe_sc_bypass {
-+ DECLARE_BITMAP(ingress, PPE_SC_BYPASS_INGRESS_SIZE);
-+ DECLARE_BITMAP(egress, PPE_SC_BYPASS_EGRESS_SIZE);
-+ DECLARE_BITMAP(counter, PPE_SC_BYPASS_COUNTER_SIZE);
-+ DECLARE_BITMAP(tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
-+};
-+
-+/**
-+ * struct ppe_servcode_cfg - PPE service code configuration.
-+ * @dest_port_valid: Generate destination port or not.
-+ * @dest_port: Destination port ID.
-+ * @bitmaps: Bitmap of bypass features.
-+ * @is_src: Destination port acts as source port, packet sent to CPU.
-+ * @field_update_bitmap: Fields updated to the EDMA preheader.
-+ * @next_service_code: New service code.
-+ * @hw_service: Hardware functions selected.
-+ * @offset_sel: Packet offset selection.
-+ *
-+ * Service code is generated during the packet passing through PPE.
-+ */
-+struct ppe_servcode_cfg {
-+ bool dest_port_valid;
-+ int dest_port;
-+ struct ppe_sc_bypass bitmaps;
-+ bool is_src;
-+ int field_update_bitmap;
-+ int next_service_code;
-+ int hw_service;
-+ int offset_sel;
-+};
-+
- int ppe_hw_config(struct ppe_device *ppe_dev);
- int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
- int node_id, bool flow_level, int port,
-@@ -74,4 +213,7 @@ int ppe_queue_ucast_hash_class_set(struc
- int class_offset);
- int ppe_port_resource_get(struct ppe_device *ppe_dev, int port, int type,
- int *res_start, int *res_end);
-+int ppe_servcode_config_set(struct ppe_device *ppe_dev,
-+ int servcode,
-+ struct ppe_servcode_cfg cfg);
- #endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -27,9 +27,57 @@
- #define PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID BIT(6)
- #define PPE_BM_SCH_CFG_TBL_SECOND_PORT GENMASK(11, 8)
-
-+/* PPE service code configuration on the ingress direction. */
-+#define PPE_SERVICE_TBL_ADDR 0x15000
-+#define PPE_SERVICE_TBL_NUM 256
-+#define PPE_SERVICE_TBL_INC 0x10
-+#define PPE_SERVICE_W0_BYPASS_BITMAP GENMASK(31, 0)
-+#define PPE_SERVICE_W1_RX_CNT_EN BIT(0)
-+
-+#define PPE_SERVICE_SET_BYPASS_BITMAP(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_SERVICE_W0_BYPASS_BITMAP)
-+#define PPE_SERVICE_SET_RX_CNT_EN(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_SERVICE_W1_RX_CNT_EN)
-+
- #define PPE_EG_BRIDGE_CONFIG_ADDR 0x20044
- #define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN BIT(2)
-
-+/* PPE service code configuration on the egress direction. */
-+#define PPE_EG_SERVICE_TBL_ADDR 0x43000
-+#define PPE_EG_SERVICE_TBL_NUM 256
-+#define PPE_EG_SERVICE_TBL_INC 0x10
-+#define PPE_EG_SERVICE_W0_UPDATE_ACTION GENMASK(31, 0)
-+#define PPE_EG_SERVICE_W1_NEXT_SERVCODE GENMASK(7, 0)
-+#define PPE_EG_SERVICE_W1_HW_SERVICE GENMASK(13, 8)
-+#define PPE_EG_SERVICE_W1_OFFSET_SEL BIT(14)
-+#define PPE_EG_SERVICE_W1_TX_CNT_EN BIT(15)
-+
-+#define PPE_EG_SERVICE_SET_UPDATE_ACTION(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_EG_SERVICE_W0_UPDATE_ACTION)
-+#define PPE_EG_SERVICE_SET_NEXT_SERVCODE(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_NEXT_SERVCODE)
-+#define PPE_EG_SERVICE_SET_HW_SERVICE(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_HW_SERVICE)
-+#define PPE_EG_SERVICE_SET_OFFSET_SEL(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_OFFSET_SEL)
-+#define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_TX_CNT_EN)
-+
-+#define PPE_IN_L2_SERVICE_TBL_ADDR 0x66000
-+#define PPE_IN_L2_SERVICE_TBL_NUM 256
-+#define PPE_IN_L2_SERVICE_TBL_INC 0x10
-+#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID BIT(0)
-+#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID GENMASK(4, 1)
-+#define PPE_IN_L2_SERVICE_TBL_DST_DIRECTION BIT(5)
-+#define PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP GENMASK(29, 6)
-+#define PPE_IN_L2_SERVICE_TBL_RX_CNT_EN BIT(30)
-+#define PPE_IN_L2_SERVICE_TBL_TX_CNT_EN BIT(31)
-+
-+#define PPE_TL_SERVICE_TBL_ADDR 0x306000
-+#define PPE_TL_SERVICE_TBL_NUM 256
-+#define PPE_TL_SERVICE_TBL_INC 4
-+#define PPE_TL_SERVICE_TBL_BYPASS_BITMAP GENMASK(31, 0)
-+
- #define PPE_PSCH_SCH_DEPTH_CFG_ADDR 0x400000
- #define PPE_PSCH_SCH_DEPTH_CFG_NUM 1
- #define PPE_PSCH_SCH_DEPTH_CFG_INC 4
+++ /dev/null
-From 61881bae3ad9d961139e970f1aae75070cd45b5c Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 27 Dec 2023 14:11:40 +0800
-Subject: [PATCH 24/50] net: ethernet: qualcomm: Add PPE port control config
-
-1. Initialize and setup the physical port.
-2. Configure the default action as drop when the packet size
- is more than the configured MTU of physical port.
-
-Change-Id: Id98aea7b17556f85021905978b3403ca6d427557
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- .../net/ethernet/qualcomm/ppe/ppe_config.c | 91 ++++++++++++++++++-
- .../net/ethernet/qualcomm/ppe/ppe_config.h | 11 +++
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 50 ++++++++++
- 3 files changed, 151 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-@@ -1238,6 +1238,50 @@ int ppe_servcode_config_set(struct ppe_d
- return regmap_write(ppe_dev->regmap, reg, val);
- }
-
-+/**
-+ * ppe_counter_set - Set PPE port counter enabled or not
-+ * @ppe_dev: PPE device
-+ * @port: PPE port ID
-+ * @enable: Counter status
-+ *
-+ * PPE port counter is optionally configured as enabled or not.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_counter_set(struct ppe_device *ppe_dev, int port, bool enable)
-+{
-+ u32 reg, val, mru_mtu_val[3];
-+ int ret;
-+
-+ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * port;
-+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
-+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
-+ if (ret)
-+ return ret;
-+
-+ PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(mru_mtu_val, enable);
-+ PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(mru_mtu_val, enable);
-+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
-+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
-+ if (ret)
-+ return ret;
-+
-+ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * port;
-+ val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_TX_CNT_EN, enable);
-+ ret = regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_MC_MTU_CTRL_TBL_TX_CNT_EN,
-+ val);
-+ if (ret)
-+ return ret;
-+
-+ reg = PPE_PORT_EG_VLAN_ADDR + PPE_PORT_EG_VLAN_INC * port;
-+ val = FIELD_PREP(PPE_PORT_EG_VLAN_TX_COUNTING_EN, enable);
-+
-+ return regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_PORT_EG_VLAN_TX_COUNTING_EN,
-+ val);
-+}
-+
- static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
- struct ppe_bm_port_config port_cfg)
- {
-@@ -1659,6 +1703,47 @@ static int ppe_servcode_init(struct ppe_
- return ppe_servcode_config_set(ppe_dev, PPE_EDMA_SC_BYPASS_ID, servcode_cfg);
- }
-
-+/* Initialize PPE port configurations. */
-+static int ppe_port_ctrl_init(struct ppe_device *ppe_dev)
-+{
-+ u32 reg, val, mru_mtu_val[3];
-+ int i, ret;
-+
-+ for (i = 1; i < ppe_dev->num_ports; i++) {
-+ /* Enable PPE port counter */
-+ ret = ppe_counter_set(ppe_dev, i, true);
-+ if (ret)
-+ return ret;
-+
-+ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * i;
-+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
-+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
-+ if (ret)
-+ return ret;
-+
-+ /* Drop the packet when the packet size is more than
-+ * the MTU or MRU of the physical PPE port.
-+ */
-+ PPE_MRU_MTU_CTRL_SET_MRU_CMD(mru_mtu_val, PPE_ACTION_DROP);
-+ PPE_MRU_MTU_CTRL_SET_MTU_CMD(mru_mtu_val, PPE_ACTION_DROP);
-+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
-+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
-+ if (ret)
-+ return ret;
-+
-+ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * i;
-+ val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_MTU_CMD, PPE_ACTION_DROP);
-+ ret = regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_MC_MTU_CTRL_TBL_MTU_CMD,
-+ val);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ /* Enable CPU port counter. */
-+ return ppe_counter_set(ppe_dev, 0, true);
-+}
-+
- /* Initialize PPE device to handle traffic correctly. */
- static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
- {
-@@ -1668,7 +1753,11 @@ static int ppe_dev_hw_init(struct ppe_de
- if (ret)
- return ret;
-
-- return ppe_servcode_init(ppe_dev);
-+ ret = ppe_servcode_init(ppe_dev);
-+ if (ret)
-+ return ret;
-+
-+ return ppe_port_ctrl_init(ppe_dev);
- }
-
- int ppe_hw_config(struct ppe_device *ppe_dev)
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-@@ -192,6 +192,16 @@ struct ppe_servcode_cfg {
- int offset_sel;
- };
-
-+/* The action of packet received by PPE can be forwarded, dropped, copied
-+ * to CPU (enter multicast queue), redirected to CPU (enter unicast queue).
-+ */
-+enum ppe_action_type {
-+ PPE_ACTION_FORWARD = 0,
-+ PPE_ACTION_DROP = 1,
-+ PPE_ACTION_COPY_TO_CPU = 2,
-+ PPE_ACTION_REDIRECT_TO_CPU = 3,
-+};
-+
- int ppe_hw_config(struct ppe_device *ppe_dev);
- int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
- int node_id, bool flow_level, int port,
-@@ -216,4 +226,5 @@ int ppe_port_resource_get(struct ppe_dev
- int ppe_servcode_config_set(struct ppe_device *ppe_dev,
- int servcode,
- struct ppe_servcode_cfg cfg);
-+int ppe_counter_set(struct ppe_device *ppe_dev, int port, bool enable);
- #endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -18,6 +18,11 @@
- #define PPE_BM_SCH_CTRL_SCH_OFFSET GENMASK(14, 8)
- #define PPE_BM_SCH_CTRL_SCH_EN BIT(31)
-
-+#define PPE_RX_FIFO_CFG_ADDR 0xb004
-+#define PPE_RX_FIFO_CFG_NUM 8
-+#define PPE_RX_FIFO_CFG_INC 4
-+#define PPE_RX_FIFO_CFG_THRSH GENMASK(2, 0)
-+
- #define PPE_BM_SCH_CFG_TBL_ADDR 0xc000
- #define PPE_BM_SCH_CFG_TBL_NUM 128
- #define PPE_BM_SCH_CFG_TBL_INC 0x10
-@@ -39,6 +44,17 @@
- #define PPE_SERVICE_SET_RX_CNT_EN(tbl_cfg, value) \
- u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_SERVICE_W1_RX_CNT_EN)
-
-+#define PPE_PORT_EG_VLAN_ADDR 0x20020
-+#define PPE_PORT_EG_VLAN_NUM 8
-+#define PPE_PORT_EG_VLAN_INC 4
-+#define PPE_PORT_EG_VLAN_VLAN_TYPE BIT(0)
-+#define PPE_PORT_EG_VLAN_CTAG_MODE GENMASK(2, 1)
-+#define PPE_PORT_EG_VLAN_STAG_MODE GENMASK(4, 3)
-+#define PPE_PORT_EG_VLAN_VSI_TAG_MODE_EN BIT(5)
-+#define PPE_PORT_EG_VLAN_PCP_PROP_CMD BIT(6)
-+#define PPE_PORT_EG_VLAN_DEI_PROP_CMD BIT(7)
-+#define PPE_PORT_EG_VLAN_TX_COUNTING_EN BIT(8)
-+
- #define PPE_EG_BRIDGE_CONFIG_ADDR 0x20044
- #define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN BIT(2)
-
-@@ -63,6 +79,40 @@
- #define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value) \
- u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_TX_CNT_EN)
-
-+#define PPE_MC_MTU_CTRL_TBL_ADDR 0x60a00
-+#define PPE_MC_MTU_CTRL_TBL_NUM 8
-+#define PPE_MC_MTU_CTRL_TBL_INC 4
-+#define PPE_MC_MTU_CTRL_TBL_MTU GENMASK(13, 0)
-+#define PPE_MC_MTU_CTRL_TBL_MTU_CMD GENMASK(15, 14)
-+#define PPE_MC_MTU_CTRL_TBL_TX_CNT_EN BIT(16)
-+
-+/* PPE port control configuration, the MTU and MRU configs. */
-+#define PPE_MRU_MTU_CTRL_TBL_ADDR 0x65000
-+#define PPE_MRU_MTU_CTRL_TBL_NUM 256
-+#define PPE_MRU_MTU_CTRL_TBL_INC 0x10
-+#define PPE_MRU_MTU_CTRL_W0_MRU GENMASK(13, 0)
-+#define PPE_MRU_MTU_CTRL_W0_MRU_CMD GENMASK(15, 14)
-+#define PPE_MRU_MTU_CTRL_W0_MTU GENMASK(29, 16)
-+#define PPE_MRU_MTU_CTRL_W0_MTU_CMD GENMASK(31, 30)
-+#define PPE_MRU_MTU_CTRL_W1_RX_CNT_EN BIT(0)
-+#define PPE_MRU_MTU_CTRL_W1_TX_CNT_EN BIT(1)
-+#define PPE_MRU_MTU_CTRL_W1_SRC_PROFILE GENMASK(3, 2)
-+#define PPE_MRU_MTU_CTRL_W1_INNER_PREC_LOW BIT(31)
-+#define PPE_MRU_MTU_CTRL_W2_INNER_PREC_HIGH GENMASK(1, 0)
-+
-+#define PPE_MRU_MTU_CTRL_SET_MRU(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_MRU_MTU_CTRL_W0_MRU)
-+#define PPE_MRU_MTU_CTRL_SET_MRU_CMD(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_MRU_MTU_CTRL_W0_MRU_CMD)
-+#define PPE_MRU_MTU_CTRL_SET_MTU(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_MRU_MTU_CTRL_W0_MTU)
-+#define PPE_MRU_MTU_CTRL_SET_MTU_CMD(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_MRU_MTU_CTRL_W0_MTU_CMD)
-+#define PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_MRU_MTU_CTRL_W1_RX_CNT_EN)
-+#define PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_MRU_MTU_CTRL_W1_TX_CNT_EN)
-+
- #define PPE_IN_L2_SERVICE_TBL_ADDR 0x66000
- #define PPE_IN_L2_SERVICE_TBL_NUM 256
- #define PPE_IN_L2_SERVICE_TBL_INC 0x10
+++ /dev/null
-From b052daae2f22a7a7fcfe981598444c3f5fb370b4 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 27 Dec 2023 14:52:13 +0800
-Subject: [PATCH 25/50] net: ethernet: qualcomm: Add PPE RSS hash config
-
-PPE RSS hash is generated by the configured seed based on the
-packet content, which is used to select queue and can also be
-passed to EDMA RX descriptor.
-
-Change-Id: If02cb25aa81a3afb0f3d68b2a5a354bd6cee28b8
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- .../net/ethernet/qualcomm/ppe/ppe_config.c | 182 +++++++++++++++++-
- .../net/ethernet/qualcomm/ppe/ppe_config.h | 36 ++++
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 47 +++++
- 3 files changed, 263 insertions(+), 2 deletions(-)
-
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-@@ -1282,6 +1282,143 @@ int ppe_counter_set(struct ppe_device *p
- val);
- }
-
-+static int ppe_rss_hash_ipv4_config(struct ppe_device *ppe_dev, int index,
-+ struct ppe_rss_hash_cfg cfg)
-+{
-+ u32 reg, val;
-+
-+ switch (index) {
-+ case 0:
-+ val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_sip_mix[0]);
-+ break;
-+ case 1:
-+ val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_dip_mix[0]);
-+ break;
-+ case 2:
-+ val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_protocol_mix);
-+ break;
-+ case 3:
-+ val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_dport_mix);
-+ break;
-+ case 4:
-+ val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_sport_mix);
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ reg = PPE_RSS_HASH_MIX_IPV4_ADDR + index * PPE_RSS_HASH_MIX_IPV4_INC;
-+
-+ return regmap_write(ppe_dev->regmap, reg, val);
-+}
-+
-+static int ppe_rss_hash_ipv6_config(struct ppe_device *ppe_dev, int index,
-+ struct ppe_rss_hash_cfg cfg)
-+{
-+ u32 reg, val;
-+
-+ switch (index) {
-+ case 0 ... 3:
-+ val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_sip_mix[index]);
-+ break;
-+ case 4 ... 7:
-+ val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_dip_mix[index - 4]);
-+ break;
-+ case 8:
-+ val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_protocol_mix);
-+ break;
-+ case 9:
-+ val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_dport_mix);
-+ break;
-+ case 10:
-+ val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_sport_mix);
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ reg = PPE_RSS_HASH_MIX_ADDR + index * PPE_RSS_HASH_MIX_INC;
-+
-+ return regmap_write(ppe_dev->regmap, reg, val);
-+}
-+
-+/**
-+ * ppe_rss_hash_config_set - Set PPE RSS hash seed
-+ * @ppe_dev: PPE device
-+ * @mode: Packet format mode
-+ * @hash_cfg: RSS hash configuration
-+ *
-+ * PPE RSS hash seed is configured based on the packet format.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
-+ struct ppe_rss_hash_cfg cfg)
-+{
-+ u32 val, reg;
-+ int i, ret;
-+
-+ if (mode & PPE_RSS_HASH_MODE_IPV4) {
-+ val = FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_HASH_MASK, cfg.hash_mask);
-+ val |= FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_FRAGMENT, cfg.hash_fragment_mode);
-+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_IPV4_ADDR, val);
-+ if (ret)
-+ return ret;
-+
-+ val = FIELD_PREP(PPE_RSS_HASH_SEED_IPV4_VAL, cfg.hash_seed);
-+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_IPV4_ADDR, val);
-+ if (ret)
-+ return ret;
-+
-+ for (i = 0; i < PPE_RSS_HASH_MIX_IPV4_NUM; i++) {
-+ ret = ppe_rss_hash_ipv4_config(ppe_dev, i, cfg);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ for (i = 0; i < PPE_RSS_HASH_FIN_IPV4_NUM; i++) {
-+ val = FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_INNER, cfg.hash_fin_inner[i]);
-+ val |= FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_OUTER, cfg.hash_fin_outer[i]);
-+ reg = PPE_RSS_HASH_FIN_IPV4_ADDR + i * PPE_RSS_HASH_FIN_IPV4_INC;
-+
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ return ret;
-+ }
-+ }
-+
-+ if (mode & PPE_RSS_HASH_MODE_IPV6) {
-+ val = FIELD_PREP(PPE_RSS_HASH_MASK_HASH_MASK, cfg.hash_mask);
-+ val |= FIELD_PREP(PPE_RSS_HASH_MASK_FRAGMENT, cfg.hash_fragment_mode);
-+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_ADDR, val);
-+ if (ret)
-+ return ret;
-+
-+ val = FIELD_PREP(PPE_RSS_HASH_SEED_VAL, cfg.hash_seed);
-+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_ADDR, val);
-+ if (ret)
-+ return ret;
-+
-+ for (i = 0; i < PPE_RSS_HASH_MIX_NUM; i++) {
-+ ret = ppe_rss_hash_ipv6_config(ppe_dev, i, cfg);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ for (i = 0; i < PPE_RSS_HASH_FIN_NUM; i++) {
-+ val = FIELD_PREP(PPE_RSS_HASH_FIN_INNER, cfg.hash_fin_inner[i]);
-+ val |= FIELD_PREP(PPE_RSS_HASH_FIN_OUTER, cfg.hash_fin_outer[i]);
-+ reg = PPE_RSS_HASH_FIN_ADDR + i * PPE_RSS_HASH_FIN_INC;
-+
-+ ret = regmap_write(ppe_dev->regmap, reg, val);
-+ if (ret)
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
- static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
- struct ppe_bm_port_config port_cfg)
- {
-@@ -1324,7 +1461,7 @@ static int ppe_config_bm_threshold(struc
- return regmap_update_bits(ppe_dev->regmap, reg,
- PPE_BM_PORT_FC_MODE_EN,
- val);
--}
-+};
-
- /* Configure the buffer threshold for the port flow control function. */
- static int ppe_config_bm(struct ppe_device *ppe_dev)
-@@ -1744,6 +1881,43 @@ static int ppe_port_ctrl_init(struct ppe
- return ppe_counter_set(ppe_dev, 0, true);
- }
-
-+/* Initialize PPE RSS hash configuration, the RSS hash configs decides the
-+ * random hash value generated, which is used to generate the queue offset.
-+ */
-+static int ppe_rss_hash_init(struct ppe_device *ppe_dev)
-+{
-+ u16 fins[PPE_RSS_HASH_TUPLES] = { 0x205, 0x264, 0x227, 0x245, 0x201 };
-+ u8 ips[PPE_RSS_HASH_IP_LENGTH] = { 0x13, 0xb, 0x13, 0xb };
-+ struct ppe_rss_hash_cfg hash_cfg;
-+ int i, ret;
-+
-+ hash_cfg.hash_seed = get_random_u32();
-+ hash_cfg.hash_mask = 0xfff;
-+ hash_cfg.hash_fragment_mode = false;
-+
-+ for (i = 0; i < ARRAY_SIZE(fins); i++) {
-+ hash_cfg.hash_fin_inner[i] = fins[i] & 0x1f;
-+ hash_cfg.hash_fin_outer[i] = fins[i] >> 5;
-+ }
-+
-+ hash_cfg.hash_protocol_mix = 0x13;
-+ hash_cfg.hash_dport_mix = 0xb;
-+ hash_cfg.hash_sport_mix = 0x13;
-+ hash_cfg.hash_sip_mix[0] = 0x13;
-+ hash_cfg.hash_dip_mix[0] = 0xb;
-+
-+ ret = ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV4, hash_cfg);
-+ if (ret)
-+ return ret;
-+
-+ for (i = 0; i < ARRAY_SIZE(ips); i++) {
-+ hash_cfg.hash_sip_mix[i] = ips[i];
-+ hash_cfg.hash_dip_mix[i] = ips[i];
-+ }
-+
-+ return ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV6, hash_cfg);
-+}
-+
- /* Initialize PPE device to handle traffic correctly. */
- static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
- {
-@@ -1757,7 +1931,11 @@ static int ppe_dev_hw_init(struct ppe_de
- if (ret)
- return ret;
-
-- return ppe_port_ctrl_init(ppe_dev);
-+ ret = ppe_port_ctrl_init(ppe_dev);
-+ if (ret)
-+ return ret;
-+
-+ return ppe_rss_hash_init(ppe_dev);
- }
-
- int ppe_hw_config(struct ppe_device *ppe_dev)
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-@@ -15,6 +15,11 @@
- #define PPE_QUEUE_BASE_CPU_CODE 1024
- #define PPE_QUEUE_BASE_SERVICE_CODE 2048
-
-+#define PPE_RSS_HASH_MODE_IPV4 BIT(0)
-+#define PPE_RSS_HASH_MODE_IPV6 BIT(1)
-+#define PPE_RSS_HASH_IP_LENGTH 4
-+#define PPE_RSS_HASH_TUPLES 5
-+
- /**
- * struct ppe_qos_scheduler_cfg - PPE QoS scheduler configuration.
- * @flow_id: PPE flow ID.
-@@ -202,6 +207,35 @@ enum ppe_action_type {
- PPE_ACTION_REDIRECT_TO_CPU = 3,
- };
-
-+/**
-+ * struct ppe_rss_hash_cfg - PPE RSS hash configuration.
-+ * @hash_mask: Mask of the generated hash value.
-+ * @hash_fragment_mode: Mode of the fragment packet for 3 tuples.
-+ * @hash_seed: Seed to generate RSS hash.
-+ * @hash_sip_mix: Source IP selection.
-+ * @hash_dip_mix: Destination IP selection.
-+ * @hash_protocol_mix: Protocol selection.
-+ * @hash_sport_mix: Source L4 port selection.
-+ * @hash_sport_mix: Destination L4 port selection.
-+ * @hash_fin_inner: RSS hash value first selection.
-+ * @hash_fin_outer: RSS hash value second selection.
-+ *
-+ * PPE RSS hash value is generated based on the RSS hash configuration
-+ * with the received packet.
-+ */
-+struct ppe_rss_hash_cfg {
-+ u32 hash_mask;
-+ bool hash_fragment_mode;
-+ u32 hash_seed;
-+ u8 hash_sip_mix[PPE_RSS_HASH_IP_LENGTH];
-+ u8 hash_dip_mix[PPE_RSS_HASH_IP_LENGTH];
-+ u8 hash_protocol_mix;
-+ u8 hash_sport_mix;
-+ u8 hash_dport_mix;
-+ u8 hash_fin_inner[PPE_RSS_HASH_TUPLES];
-+ u8 hash_fin_outer[PPE_RSS_HASH_TUPLES];
-+};
-+
- int ppe_hw_config(struct ppe_device *ppe_dev);
- int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
- int node_id, bool flow_level, int port,
-@@ -227,4 +261,6 @@ int ppe_servcode_config_set(struct ppe_d
- int servcode,
- struct ppe_servcode_cfg cfg);
- int ppe_counter_set(struct ppe_device *ppe_dev, int port, bool enable);
-+int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
-+ struct ppe_rss_hash_cfg hash_cfg);
- #endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -23,6 +23,53 @@
- #define PPE_RX_FIFO_CFG_INC 4
- #define PPE_RX_FIFO_CFG_THRSH GENMASK(2, 0)
-
-+/* RSS configs contributes to the random RSS hash value generated, which
-+ * is used to configure the queue offset.
-+ */
-+#define PPE_RSS_HASH_MASK_ADDR 0xb4318
-+#define PPE_RSS_HASH_MASK_NUM 1
-+#define PPE_RSS_HASH_MASK_INC 4
-+#define PPE_RSS_HASH_MASK_HASH_MASK GENMASK(20, 0)
-+#define PPE_RSS_HASH_MASK_FRAGMENT BIT(28)
-+
-+#define PPE_RSS_HASH_SEED_ADDR 0xb431c
-+#define PPE_RSS_HASH_SEED_NUM 1
-+#define PPE_RSS_HASH_SEED_INC 4
-+#define PPE_RSS_HASH_SEED_VAL GENMASK(31, 0)
-+
-+#define PPE_RSS_HASH_MIX_ADDR 0xb4320
-+#define PPE_RSS_HASH_MIX_NUM 11
-+#define PPE_RSS_HASH_MIX_INC 4
-+#define PPE_RSS_HASH_MIX_VAL GENMASK(4, 0)
-+
-+#define PPE_RSS_HASH_FIN_ADDR 0xb4350
-+#define PPE_RSS_HASH_FIN_NUM 5
-+#define PPE_RSS_HASH_FIN_INC 4
-+#define PPE_RSS_HASH_FIN_INNER GENMASK(4, 0)
-+#define PPE_RSS_HASH_FIN_OUTER GENMASK(9, 5)
-+
-+#define PPE_RSS_HASH_MASK_IPV4_ADDR 0xb4380
-+#define PPE_RSS_HASH_MASK_IPV4_NUM 1
-+#define PPE_RSS_HASH_MASK_IPV4_INC 4
-+#define PPE_RSS_HASH_MASK_IPV4_HASH_MASK GENMASK(20, 0)
-+#define PPE_RSS_HASH_MASK_IPV4_FRAGMENT BIT(28)
-+
-+#define PPE_RSS_HASH_SEED_IPV4_ADDR 0xb4384
-+#define PPE_RSS_HASH_SEED_IPV4_NUM 1
-+#define PPE_RSS_HASH_SEED_IPV4_INC 4
-+#define PPE_RSS_HASH_SEED_IPV4_VAL GENMASK(31, 0)
-+
-+#define PPE_RSS_HASH_MIX_IPV4_ADDR 0xb4390
-+#define PPE_RSS_HASH_MIX_IPV4_NUM 5
-+#define PPE_RSS_HASH_MIX_IPV4_INC 4
-+#define PPE_RSS_HASH_MIX_IPV4_VAL GENMASK(4, 0)
-+
-+#define PPE_RSS_HASH_FIN_IPV4_ADDR 0xb43b0
-+#define PPE_RSS_HASH_FIN_IPV4_NUM 5
-+#define PPE_RSS_HASH_FIN_IPV4_INC 4
-+#define PPE_RSS_HASH_FIN_IPV4_INNER GENMASK(4, 0)
-+#define PPE_RSS_HASH_FIN_IPV4_OUTER GENMASK(9, 5)
-+
- #define PPE_BM_SCH_CFG_TBL_ADDR 0xc000
- #define PPE_BM_SCH_CFG_TBL_NUM 128
- #define PPE_BM_SCH_CFG_TBL_INC 0x10
+++ /dev/null
-From 809513a92e3aef6ae852b35e118408059929d6d3 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 27 Dec 2023 15:44:37 +0800
-Subject: [PATCH 26/50] net: ethernet: qualcomm: Add PPE queue map function
-
-Configure the queues of CPU port mapped with the EDMA ring.
-
-All queues of CPU port are mappled to the EDMA ring 0 by default,
-which can be updated by EDMA driver.
-
-Change-Id: I87ab4117af86e4b3fe7a4b41490ba8ac71ce29ef
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/ppe_api.c | 23 ++++++++++
- drivers/net/ethernet/qualcomm/ppe/ppe_api.h | 2 +
- .../net/ethernet/qualcomm/ppe/ppe_config.c | 45 ++++++++++++++++++-
- .../net/ethernet/qualcomm/ppe/ppe_config.h | 5 +++
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 5 +++
- 5 files changed, 79 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
-@@ -82,3 +82,26 @@ int ppe_edma_queue_resource_get(struct p
-
- return ppe_port_resource_get(ppe_dev, 0, type, res_start, res_end);
- };
-+
-+/**
-+ * ppe_edma_ring_to_queues_config - Map EDMA ring to PPE queues
-+ * @ppe_dev: PPE device
-+ * @ring_id: EDMA ring ID
-+ * @num: Number of queues mapped to EDMA ring
-+ * @queues: PPE queue IDs
-+ *
-+ * PPE queues are configured to map with the special EDMA ring ID.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_edma_ring_to_queues_config(struct ppe_device *ppe_dev, int ring_id,
-+ int num, int queues[] __counted_by(num))
-+{
-+ u32 queue_bmap[PPE_RING_MAPPED_BP_QUEUE_WORD_COUNT] = {};
-+ int index;
-+
-+ for (index = 0; index < num; index++)
-+ queue_bmap[queues[index] / 32] |= BIT_MASK(queues[index] % 32);
-+
-+ return ppe_ring_queue_map_set(ppe_dev, ring_id, queue_bmap);
-+}
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
-@@ -55,4 +55,6 @@ int ppe_edma_queue_offset_config(struct
- int index, int queue_offset);
- int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
- int *res_start, int *res_end);
-+int ppe_edma_ring_to_queues_config(struct ppe_device *ppe_dev, int ring_id,
-+ int num, int queues[] __counted_by(num));
- #endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-@@ -1419,6 +1419,28 @@ int ppe_rss_hash_config_set(struct ppe_d
- return 0;
- }
-
-+/**
-+ * ppe_ring_queue_map_set - Set PPE queue mapped with EDMA ring
-+ * @ppe_dev: PPE device
-+ * @ring_id: EDMA ring ID
-+ * @queue_map: Queue bit map
-+ *
-+ * PPE queue is configured to use the special Ring.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int ppe_ring_queue_map_set(struct ppe_device *ppe_dev, int ring_id, u32 *queue_map)
-+{
-+ u32 reg, queue_bitmap_val[PPE_RING_MAPPED_BP_QUEUE_WORD_COUNT];
-+
-+ memcpy(queue_bitmap_val, queue_map, sizeof(queue_bitmap_val));
-+ reg = PPE_RING_Q_MAP_TBL_ADDR + PPE_RING_Q_MAP_TBL_INC * ring_id;
-+
-+ return regmap_bulk_write(ppe_dev->regmap, reg,
-+ queue_bitmap_val,
-+ ARRAY_SIZE(queue_bitmap_val));
-+}
-+
- static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
- struct ppe_bm_port_config port_cfg)
- {
-@@ -1918,6 +1940,23 @@ static int ppe_rss_hash_init(struct ppe_
- return ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV6, hash_cfg);
- }
-
-+/* Initialize queues of CPU port mapped with EDMA ring 0. */
-+static int ppe_queues_to_ring_init(struct ppe_device *ppe_dev)
-+{
-+ u32 queue_bmap[PPE_RING_MAPPED_BP_QUEUE_WORD_COUNT] = {};
-+ int ret, queue_id, queue_max;
-+
-+ ret = ppe_port_resource_get(ppe_dev, 0, PPE_RES_UCAST,
-+ &queue_id, &queue_max);
-+ if (ret)
-+ return ret;
-+
-+ for (; queue_id <= queue_max; queue_id++)
-+ queue_bmap[queue_id / 32] |= BIT_MASK(queue_id % 32);
-+
-+ return ppe_ring_queue_map_set(ppe_dev, 0, queue_bmap);
-+}
-+
- /* Initialize PPE device to handle traffic correctly. */
- static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
- {
-@@ -1935,7 +1974,11 @@ static int ppe_dev_hw_init(struct ppe_de
- if (ret)
- return ret;
-
-- return ppe_rss_hash_init(ppe_dev);
-+ ret = ppe_rss_hash_init(ppe_dev);
-+ if (ret)
-+ return ret;
-+
-+ return ppe_queues_to_ring_init(ppe_dev);
- }
-
- int ppe_hw_config(struct ppe_device *ppe_dev)
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
-@@ -20,6 +20,8 @@
- #define PPE_RSS_HASH_IP_LENGTH 4
- #define PPE_RSS_HASH_TUPLES 5
-
-+#define PPE_RING_MAPPED_BP_QUEUE_WORD_COUNT 10
-+
- /**
- * struct ppe_qos_scheduler_cfg - PPE QoS scheduler configuration.
- * @flow_id: PPE flow ID.
-@@ -263,4 +265,7 @@ int ppe_servcode_config_set(struct ppe_d
- int ppe_counter_set(struct ppe_device *ppe_dev, int port, bool enable);
- int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
- struct ppe_rss_hash_cfg hash_cfg);
-+int ppe_ring_queue_map_set(struct ppe_device *ppe_dev,
-+ int ring_id,
-+ u32 *queue_map);
- #endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -212,6 +212,11 @@
- #define PPE_L0_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
- #define PPE_L0_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
-
-+/* PPE queue bitmap. */
-+#define PPE_RING_Q_MAP_TBL_ADDR 0x42a000
-+#define PPE_RING_Q_MAP_TBL_NUM 24
-+#define PPE_RING_Q_MAP_TBL_INC 0x40
-+
- #define PPE_DEQ_OPR_TBL_ADDR 0x430000
- #define PPE_DEQ_OPR_TBL_NUM 300
- #define PPE_DEQ_OPR_TBL_INC 0x10
+++ /dev/null
-From 244012f3f879d4709be68e7ddabc064268bbd69e Mon Sep 17 00:00:00 2001
-From: Lei Wei <quic_leiwei@quicinc.com>
-Date: Thu, 28 Dec 2023 00:38:08 +0800
-Subject: [PATCH 27/50] net: ethernet: qualcomm: Add PPE L2 bridge
- initialization
-
-The per-port L2 bridge settings are initialized as follows:
-For PPE CPU port, the PPE bridge Tx is enabled and FDB learn is
-disabled. For PPE physical port, the PPE bridge Tx is disabled
-and FDB learn is enabled by default and the L2 forward action
-is initialized as forward to CPU port.
-
-Change-Id: Ida42464f1d5e53583a434a11b19e6501c649d44e
-Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
----
- .../net/ethernet/qualcomm/ppe/ppe_config.c | 68 ++++++++++++++++++-
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 54 +++++++++++++++
- 2 files changed, 121 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
-@@ -1957,6 +1957,68 @@ static int ppe_queues_to_ring_init(struc
- return ppe_ring_queue_map_set(ppe_dev, 0, queue_bmap);
- }
-
-+/* Initialize PPE bridge configuration. */
-+static int ppe_bridge_init(struct ppe_device *ppe_dev)
-+{
-+ u32 reg, mask, port_cfg[4], vsi_cfg[2];
-+ int ret, i;
-+
-+ /* CPU port0 enable bridge Tx and disable FDB new address
-+ * learning and station move address learning.
-+ */
-+ mask = PPE_PORT_BRIDGE_TXMAC_EN;
-+ mask |= PPE_PORT_BRIDGE_NEW_LRN_EN;
-+ mask |= PPE_PORT_BRIDGE_STA_MOVE_LRN_EN;
-+ ret = regmap_update_bits(ppe_dev->regmap,
-+ PPE_PORT_BRIDGE_CTRL_ADDR,
-+ mask,
-+ PPE_PORT_BRIDGE_TXMAC_EN);
-+ if (ret)
-+ return ret;
-+
-+ for (i = 1; i < ppe_dev->num_ports; i++) {
-+ /* Set Invalid VSI forwarding to CPU port0 if no VSI
-+ * is assigned to the port.
-+ */
-+ reg = PPE_L2_VP_PORT_TBL_ADDR + PPE_L2_VP_PORT_TBL_INC * i;
-+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
-+ port_cfg, ARRAY_SIZE(port_cfg));
-+
-+ if (ret)
-+ return ret;
-+
-+ PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(port_cfg, true);
-+ PPE_L2_PORT_SET_DST_INFO(port_cfg, 0);
-+
-+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
-+ port_cfg, ARRAY_SIZE(port_cfg));
-+ if (ret)
-+ return ret;
-+ }
-+
-+ for (i = 0; i < PPE_VSI_TBL_NUM; i++) {
-+ /* Enable VSI bridge forward address learning and set VSI
-+ * forward member includes CPU port0.
-+ */
-+ PPE_VSI_SET_MEMBER_PORT_BITMAP(vsi_cfg, BIT(0));
-+ PPE_VSI_SET_UUC_BITMAP(vsi_cfg, BIT(0));
-+ PPE_VSI_SET_UMC_BITMAP(vsi_cfg, BIT(0));
-+ PPE_VSI_SET_BC_BITMAP(vsi_cfg, BIT(0));
-+ PPE_VSI_SET_NEW_ADDR_LRN_EN(vsi_cfg, true);
-+ PPE_VSI_SET_NEW_ADDR_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
-+ PPE_VSI_SET_STATION_MOVE_LRN_EN(vsi_cfg, true);
-+ PPE_VSI_SET_STATION_MOVE_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
-+
-+ reg = PPE_VSI_TBL_ADDR + PPE_VSI_TBL_INC * i;
-+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
-+ vsi_cfg, ARRAY_SIZE(vsi_cfg));
-+ if (ret)
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
- /* Initialize PPE device to handle traffic correctly. */
- static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
- {
-@@ -1978,7 +2040,11 @@ static int ppe_dev_hw_init(struct ppe_de
- if (ret)
- return ret;
-
-- return ppe_queues_to_ring_init(ppe_dev);
-+ ret = ppe_queues_to_ring_init(ppe_dev);
-+ if (ret)
-+ return ret;
-+
-+ return ppe_bridge_init(ppe_dev);
- }
-
- int ppe_hw_config(struct ppe_device *ppe_dev)
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -126,6 +126,18 @@
- #define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value) \
- u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_TX_CNT_EN)
-
-+/* PPE port bridge configuration */
-+#define PPE_PORT_BRIDGE_CTRL_ADDR 0x60300
-+#define PPE_PORT_BRIDGE_CTRL_NUM 8
-+#define PPE_PORT_BRIDGE_CTRL_INC 4
-+#define PPE_PORT_BRIDGE_NEW_LRN_EN BIT(0)
-+#define PPE_PORT_BRIDGE_NEW_FWD_CMD GENMASK(2, 1)
-+#define PPE_PORT_BRIDGE_STA_MOVE_LRN_EN BIT(3)
-+#define PPE_PORT_BRIDGE_STA_MOVE_FWD_CMD GENMASK(5, 4)
-+#define PPE_PORT_BRIDGE_ISOLATION_BITMAP GENMASK(15, 8)
-+#define PPE_PORT_BRIDGE_TXMAC_EN BIT(16)
-+#define PPE_PORT_BRIDGE_PROMISC_EN BIT(17)
-+
- #define PPE_MC_MTU_CTRL_TBL_ADDR 0x60a00
- #define PPE_MC_MTU_CTRL_TBL_NUM 8
- #define PPE_MC_MTU_CTRL_TBL_INC 4
-@@ -133,6 +145,36 @@
- #define PPE_MC_MTU_CTRL_TBL_MTU_CMD GENMASK(15, 14)
- #define PPE_MC_MTU_CTRL_TBL_TX_CNT_EN BIT(16)
-
-+/* PPE VSI configurations */
-+#define PPE_VSI_TBL_ADDR 0x63800
-+#define PPE_VSI_TBL_NUM 64
-+#define PPE_VSI_TBL_INC 0x10
-+#define PPE_VSI_W0_MEMBER_PORT_BITMAP GENMASK(7, 0)
-+#define PPE_VSI_W0_UUC_BITMAP GENMASK(15, 8)
-+#define PPE_VSI_W0_UMC_BITMAP GENMASK(23, 16)
-+#define PPE_VSI_W0_BC_BITMAP GENMASK(31, 24)
-+#define PPE_VSI_W1_NEW_ADDR_LRN_EN BIT(0)
-+#define PPE_VSI_W1_NEW_ADDR_FWD_CMD GENMASK(2, 1)
-+#define PPE_VSI_W1_STATION_MOVE_LRN_EN BIT(3)
-+#define PPE_VSI_W1_STATION_MOVE_FWD_CMD GENMASK(5, 4)
-+
-+#define PPE_VSI_SET_MEMBER_PORT_BITMAP(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_VSI_W0_MEMBER_PORT_BITMAP)
-+#define PPE_VSI_SET_UUC_BITMAP(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_VSI_W0_UUC_BITMAP)
-+#define PPE_VSI_SET_UMC_BITMAP(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_VSI_W0_UMC_BITMAP)
-+#define PPE_VSI_SET_BC_BITMAP(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_VSI_W0_BC_BITMAP)
-+#define PPE_VSI_SET_NEW_ADDR_LRN_EN(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_VSI_W1_NEW_ADDR_LRN_EN)
-+#define PPE_VSI_SET_NEW_ADDR_FWD_CMD(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_VSI_W1_NEW_ADDR_FWD_CMD)
-+#define PPE_VSI_SET_STATION_MOVE_LRN_EN(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_VSI_W1_STATION_MOVE_LRN_EN)
-+#define PPE_VSI_SET_STATION_MOVE_FWD_CMD(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_VSI_W1_STATION_MOVE_FWD_CMD)
-+
- /* PPE port control configuration, the MTU and MRU configs. */
- #define PPE_MRU_MTU_CTRL_TBL_ADDR 0x65000
- #define PPE_MRU_MTU_CTRL_TBL_NUM 256
-@@ -170,6 +212,18 @@
- #define PPE_IN_L2_SERVICE_TBL_RX_CNT_EN BIT(30)
- #define PPE_IN_L2_SERVICE_TBL_TX_CNT_EN BIT(31)
-
-+/* L2 Port configurations */
-+#define PPE_L2_VP_PORT_TBL_ADDR 0x98000
-+#define PPE_L2_VP_PORT_TBL_NUM 256
-+#define PPE_L2_VP_PORT_TBL_INC 0x10
-+#define PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN BIT(0)
-+#define PPE_L2_VP_PORT_W0_DST_INFO GENMASK(9, 2)
-+
-+#define PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN)
-+#define PPE_L2_PORT_SET_DST_INFO(tbl_cfg, value) \
-+ u32p_replace_bits((u32 *)tbl_cfg, value, PPE_L2_VP_PORT_W0_DST_INFO)
-+
- #define PPE_TL_SERVICE_TBL_ADDR 0x306000
- #define PPE_TL_SERVICE_TBL_NUM 256
- #define PPE_TL_SERVICE_TBL_INC 4
+++ /dev/null
-From 45fb5b1303af9b7341c9a9fd692248aa67f5dc63 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Wed, 27 Dec 2023 17:04:08 +0800
-Subject: [PATCH 28/50] net: ethernet: qualcomm: Add PPE debugfs support
-
-The PPE hardware counter is exposed by the file
-entry "/sys/kernel/debug/ppe/packet_counter".
-
-Change-Id: I58251fe00a89f78ee6c410af1d2380270e55a176
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
- drivers/net/ethernet/qualcomm/ppe/ppe.c | 11 +
- drivers/net/ethernet/qualcomm/ppe/ppe.h | 3 +
- .../net/ethernet/qualcomm/ppe/ppe_debugfs.c | 725 ++++++++++++++++++
- .../net/ethernet/qualcomm/ppe/ppe_debugfs.h | 16 +
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 98 +++
- 6 files changed, 854 insertions(+), 1 deletion(-)
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
-
---- a/drivers/net/ethernet/qualcomm/ppe/Makefile
-+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
-@@ -4,4 +4,4 @@
- #
-
- obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
--qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o
-+qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o
---- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
-@@ -16,6 +16,7 @@
-
- #include "ppe.h"
- #include "ppe_config.h"
-+#include "ppe_debugfs.h"
-
- #define PPE_PORT_MAX 8
- #define PPE_CLK_RATE 353000000
-@@ -206,11 +207,20 @@ static int qcom_ppe_probe(struct platfor
- if (ret)
- return dev_err_probe(dev, ret, "PPE HW config failed\n");
-
-+ ppe_debugfs_setup(ppe_dev);
- platform_set_drvdata(pdev, ppe_dev);
-
- return 0;
- }
-
-+static void qcom_ppe_remove(struct platform_device *pdev)
-+{
-+ struct ppe_device *ppe_dev;
-+
-+ ppe_dev = platform_get_drvdata(pdev);
-+ ppe_debugfs_teardown(ppe_dev);
-+}
-+
- static const struct of_device_id qcom_ppe_of_match[] = {
- { .compatible = "qcom,ipq9574-ppe" },
- {},
-@@ -223,6 +233,7 @@ static struct platform_driver qcom_ppe_d
- .of_match_table = qcom_ppe_of_match,
- },
- .probe = qcom_ppe_probe,
-+ .remove_new = qcom_ppe_remove,
- };
- module_platform_driver(qcom_ppe_driver);
-
---- a/drivers/net/ethernet/qualcomm/ppe/ppe.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.h
-@@ -11,6 +11,7 @@
-
- struct device;
- struct regmap;
-+struct dentry;
-
- /**
- * struct ppe_device - PPE device private data.
-@@ -18,6 +19,7 @@ struct regmap;
- * @regmap: PPE register map.
- * @clk_rate: PPE clock rate.
- * @num_ports: Number of PPE ports.
-+ * @debugfs_root: PPE debug root entry.
- * @num_icc_paths: Number of interconnect paths.
- * @icc_paths: Interconnect path array.
- *
-@@ -30,6 +32,7 @@ struct ppe_device {
- struct regmap *regmap;
- unsigned long clk_rate;
- unsigned int num_ports;
-+ struct dentry *debugfs_root;
- unsigned int num_icc_paths;
- struct icc_bulk_data icc_paths[] __counted_by(num_icc_paths);
- };
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
-@@ -0,0 +1,725 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* PPE debugfs routines for display of PPE counters useful for debug. */
-+
-+#include <linux/debugfs.h>
-+#include <linux/regmap.h>
-+#include <linux/seq_file.h>
-+
-+#include "ppe.h"
-+#include "ppe_config.h"
-+#include "ppe_debugfs.h"
-+#include "ppe_regs.h"
-+
-+#define PPE_PKT_CNT_TBL_SIZE 3
-+#define PPE_DROP_PKT_CNT_TBL_SIZE 5
-+
-+#define PREFIX_S(desc, cnt_type) \
-+ seq_printf(seq, "%-16s %16s", desc, cnt_type)
-+#define CNT_ONE_TYPE(cnt, str, index) \
-+ seq_printf(seq, "%10u(%s=%04d)", cnt, str, index)
-+#define CNT_TWO_TYPE(cnt, cnt1, str, index) \
-+ seq_printf(seq, "%10u/%u(%s=%04d)", cnt, cnt1, str, index)
-+#define CNT_CPU_CODE(cnt, index) \
-+ seq_printf(seq, "%10u(cpucode:%d)", cnt, index)
-+#define CNT_DROP_CODE(cnt, port, index) \
-+ seq_printf(seq, "%10u(port=%d),dropcode:%d", cnt, port, index)
-+
-+#define PPE_W0_PKT_CNT GENMASK(31, 0)
-+#define PPE_W2_DROP_PKT_CNT_LOW GENMASK(31, 8)
-+#define PPE_W3_DROP_PKT_CNT_HIGH GENMASK(7, 0)
-+
-+#define PPE_GET_PKT_CNT(tbl_cfg) \
-+ u32_get_bits(*((u32 *)(tbl_cfg)), PPE_W0_PKT_CNT)
-+#define PPE_GET_DROP_PKT_CNT_LOW(tbl_cfg) \
-+ u32_get_bits(*((u32 *)(tbl_cfg) + 0x2), PPE_W2_DROP_PKT_CNT_LOW)
-+#define PPE_GET_DROP_PKT_CNT_HIGH(tbl_cfg) \
-+ u32_get_bits(*((u32 *)(tbl_cfg) + 0x3), PPE_W3_DROP_PKT_CNT_HIGH)
-+
-+/**
-+ * enum ppe_cnt_size_type - PPE counter size type
-+ * @PPE_PKT_CNT_SIZE_1WORD: Counter size with single register
-+ * @PPE_PKT_CNT_SIZE_3WORD: Counter size with table of 3 words
-+ * @PPE_PKT_CNT_SIZE_5WORD: Counter size with table of 5 words
-+ *
-+ * PPE takes the different register size to record the packet counter,
-+ * which uses single register or register table with 3 words or 5 words.
-+ * The counter with table size 5 words also records the drop counter.
-+ * There are also some other counters only occupying several bits less than
-+ * 32 bits, which is not covered by this enumeration type.
-+ */
-+enum ppe_cnt_size_type {
-+ PPE_PKT_CNT_SIZE_1WORD,
-+ PPE_PKT_CNT_SIZE_3WORD,
-+ PPE_PKT_CNT_SIZE_5WORD,
-+};
-+
-+static int ppe_pkt_cnt_get(struct ppe_device *ppe_dev, u32 reg,
-+ enum ppe_cnt_size_type cnt_type,
-+ u32 *cnt, u32 *drop_cnt)
-+{
-+ u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE];
-+ u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE];
-+ u32 value;
-+ int ret;
-+
-+ switch (cnt_type) {
-+ case PPE_PKT_CNT_SIZE_1WORD:
-+ ret = regmap_read(ppe_dev->regmap, reg, &value);
-+ if (ret)
-+ return ret;
-+
-+ *cnt = value;
-+ break;
-+ case PPE_PKT_CNT_SIZE_3WORD:
-+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
-+ pkt_cnt, ARRAY_SIZE(pkt_cnt));
-+ if (ret)
-+ return ret;
-+
-+ *cnt = PPE_GET_PKT_CNT(pkt_cnt);
-+ break;
-+ case PPE_PKT_CNT_SIZE_5WORD:
-+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
-+ drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
-+ if (ret)
-+ return ret;
-+
-+ *cnt = PPE_GET_PKT_CNT(drop_pkt_cnt);
-+
-+ /* Drop counter with low 24 bits. */
-+ value = PPE_GET_DROP_PKT_CNT_LOW(drop_pkt_cnt);
-+ *drop_cnt = FIELD_PREP(GENMASK(23, 0), value);
-+
-+ /* Drop counter with high 8 bits. */
-+ value = PPE_GET_DROP_PKT_CNT_HIGH(drop_pkt_cnt);
-+ *drop_cnt |= FIELD_PREP(GENMASK(31, 24), value);
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static void ppe_tbl_pkt_cnt_clear(struct ppe_device *ppe_dev, u32 reg,
-+ enum ppe_cnt_size_type cnt_type)
-+{
-+ u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE] = {};
-+ u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE] = {};
-+
-+ switch (cnt_type) {
-+ case PPE_PKT_CNT_SIZE_1WORD:
-+ regmap_write(ppe_dev->regmap, reg, 0);
-+ break;
-+ case PPE_PKT_CNT_SIZE_3WORD:
-+ regmap_bulk_write(ppe_dev->regmap, reg,
-+ pkt_cnt, ARRAY_SIZE(pkt_cnt));
-+ break;
-+ case PPE_PKT_CNT_SIZE_5WORD:
-+ regmap_bulk_write(ppe_dev->regmap, reg,
-+ drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
-+ break;
-+ }
-+}
-+
-+/* The number of packets dropped because of no buffer available. */
-+static void ppe_prx_drop_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ int ret, i, tag = 0;
-+ u32 reg, drop_cnt;
-+
-+ PREFIX_S("PRX_DROP_CNT", "SILENT_DROP:");
-+ for (i = 0; i < PPE_DROP_CNT_NUM; i++) {
-+ reg = PPE_DROP_CNT_ADDR + i * PPE_DROP_CNT_INC;
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
-+ &drop_cnt, NULL);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ if (drop_cnt > 0) {
-+ tag++;
-+ if (!(tag % 4)) {
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ CNT_ONE_TYPE(drop_cnt, "port", i);
-+ }
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* The number of packet dropped because of no enough buffer to cache
-+ * packet, some buffer allocated for the part of packet.
-+ */
-+static void ppe_prx_bm_drop_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ u32 reg, pkt_cnt = 0;
-+ int ret, i, tag = 0;
-+
-+ PREFIX_S("PRX_BM_DROP_CNT", "OVERFLOW_DROP:");
-+ for (i = 0; i < PPE_DROP_STAT_NUM; i++) {
-+ reg = PPE_DROP_STAT_ADDR + PPE_DROP_STAT_INC * i;
-+
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
-+ &pkt_cnt, NULL);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ if (pkt_cnt > 0) {
-+ tag++;
-+ if (!(tag % 4)) {
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ CNT_ONE_TYPE(pkt_cnt, "port", i);
-+ }
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* The number of currently occupied buffers, that can't be flushed. */
-+static void ppe_prx_bm_port_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ int used_cnt, react_cnt;
-+ int ret, i, tag = 0;
-+ u32 reg, val;
-+
-+ PREFIX_S("PRX_BM_PORT_CNT", "USED/REACT:");
-+ for (i = 0; i < PPE_BM_USED_CNT_NUM; i++) {
-+ reg = PPE_BM_USED_CNT_ADDR + i * PPE_BM_USED_CNT_INC;
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ used_cnt = FIELD_GET(PPE_BM_USED_CNT_VAL, val);
-+
-+ reg = PPE_BM_REACT_CNT_ADDR + i * PPE_BM_REACT_CNT_INC;
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ react_cnt = FIELD_GET(PPE_BM_REACT_CNT_VAL, val);
-+
-+ if (used_cnt > 0 || react_cnt > 0) {
-+ tag++;
-+ if (!(tag % 4)) {
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ CNT_TWO_TYPE(used_cnt, react_cnt, "port", i);
-+ }
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* The number of ingress packets. */
-+static void ppe_ipx_pkt_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ u32 reg, cnt, tunnel_cnt;
-+ int i, ret, tag = 0;
-+
-+ PREFIX_S("IPR_PKT_CNT", "TPRX/IPRX:");
-+ for (i = 0; i < PPE_IPR_PKT_CNT_NUM; i++) {
-+ reg = PPE_TPR_PKT_CNT_ADDR + i * PPE_IPR_PKT_CNT_INC;
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
-+ &tunnel_cnt, NULL);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ reg = PPE_IPR_PKT_CNT_ADDR + i * PPE_IPR_PKT_CNT_INC;
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
-+ &cnt, NULL);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ if (tunnel_cnt > 0 || cnt > 0) {
-+ tag++;
-+ if (!(tag % 4)) {
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ CNT_TWO_TYPE(tunnel_cnt, cnt, "port", i);
-+ }
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* The number of packet received or dropped on the ingress direction. */
-+static void ppe_port_rx_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ u32 reg, pkt_cnt, drop_cnt;
-+ int ret, i, tag = 0;
-+
-+ PREFIX_S("PORT_RX_CNT", "RX/RX_DROP:");
-+ for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_NUM; i++) {
-+ reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
-+ &pkt_cnt, &drop_cnt);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ if (pkt_cnt > 0) {
-+ tag++;
-+ if (!(tag % 4)) {
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ CNT_TWO_TYPE(pkt_cnt, drop_cnt, "port", i);
-+ }
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* The number of packet received or dropped by the port. */
-+static void ppe_vp_rx_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ u32 reg, pkt_cnt, drop_cnt;
-+ int ret, i, tag = 0;
-+
-+ PREFIX_S("VPORT_RX_CNT", "RX/RX_DROP:");
-+ for (i = 0; i < PPE_PORT_RX_CNT_TBL_NUM; i++) {
-+ reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
-+ &pkt_cnt, &drop_cnt);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ if (pkt_cnt > 0) {
-+ tag++;
-+ if (!(tag % 4)) {
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ CNT_TWO_TYPE(pkt_cnt, drop_cnt, "port", i);
-+ }
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* The number of packet received or dropped by layer 2 processing. */
-+static void ppe_pre_l2_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ u32 reg, pkt_cnt, drop_cnt;
-+ int ret, i, tag = 0;
-+
-+ PREFIX_S("PRE_L2_CNT", "RX/RX_DROP:");
-+ for (i = 0; i < PPE_PRE_L2_CNT_TBL_NUM; i++) {
-+ reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
-+ &pkt_cnt, &drop_cnt);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ if (pkt_cnt > 0) {
-+ tag++;
-+ if (!(tag % 4)) {
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ CNT_TWO_TYPE(pkt_cnt, drop_cnt, "vsi", i);
-+ }
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* The number of packet received for VLAN handler. */
-+static void ppe_vlan_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ u32 reg, pkt_cnt = 0;
-+ int ret, i, tag = 0;
-+
-+ PREFIX_S("VLAN_CNT", "RX:");
-+ for (i = 0; i < PPE_VLAN_CNT_TBL_NUM; i++) {
-+ reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
-+
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
-+ &pkt_cnt, NULL);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ if (pkt_cnt > 0) {
-+ tag++;
-+ if (!(tag % 4)) {
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ CNT_ONE_TYPE(pkt_cnt, "vsi", i);
-+ }
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* The number of packet forwarded to CPU handler. */
-+static void ppe_cpu_code_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ u32 reg, pkt_cnt = 0;
-+ int ret, i;
-+
-+ PREFIX_S("CPU_CODE_CNT", "CODE:");
-+ for (i = 0; i < PPE_DROP_CPU_CNT_TBL_NUM; i++) {
-+ reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
-+
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
-+ &pkt_cnt, NULL);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ if (!pkt_cnt)
-+ continue;
-+
-+ if (i < 256)
-+ CNT_CPU_CODE(pkt_cnt, i);
-+ else
-+ CNT_DROP_CODE(pkt_cnt, (i - 256) % 8, (i - 256) / 8);
-+
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* The number of packet forwarded by VLAN on the egress direction. */
-+static void ppe_eg_vsi_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ u32 reg, pkt_cnt = 0;
-+ int ret, i, tag = 0;
-+
-+ PREFIX_S("EG_VSI_CNT", "TX:");
-+ for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_NUM; i++) {
-+ reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
-+
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
-+ &pkt_cnt, NULL);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ if (pkt_cnt > 0) {
-+ tag++;
-+ if (!(tag % 4)) {
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ CNT_ONE_TYPE(pkt_cnt, "vsi", i);
-+ }
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* The number of packet trasmitted or dropped by port. */
-+static void ppe_vp_tx_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
-+ int ret, i, tag = 0;
-+
-+ PREFIX_S("VPORT_TX_CNT", "TX/TX_DROP:");
-+ for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_NUM; i++) {
-+ reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
-+ &pkt_cnt, NULL);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
-+ &drop_cnt, NULL);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ if (pkt_cnt > 0 || drop_cnt > 0) {
-+ tag++;
-+ if (!(tag % 4)) {
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ CNT_TWO_TYPE(pkt_cnt, drop_cnt, "port", i);
-+ }
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* The number of packet trasmitted or dropped on the egress direction. */
-+static void ppe_port_tx_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
-+ int ret, i, tag = 0;
-+
-+ PREFIX_S("PORT_TX_CNT", "TX/TX_DROP:");
-+ for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_NUM; i++) {
-+ reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
-+ &pkt_cnt, NULL);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
-+ &drop_cnt, NULL);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ if (pkt_cnt > 0 || drop_cnt > 0) {
-+ tag++;
-+ if (!(tag % 4)) {
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ CNT_TWO_TYPE(pkt_cnt, drop_cnt, "port", i);
-+ }
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* The number of packet trasmitted or pended by the PPE queue. */
-+static void ppe_queue_tx_counter_get(struct ppe_device *ppe_dev,
-+ struct seq_file *seq)
-+{
-+ u32 reg, val, pkt_cnt = 0, pend_cnt = 0;
-+ int ret, i, tag = 0;
-+
-+ PREFIX_S("QUEUE_TX_CNT", "TX/PEND:");
-+ for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_NUM; i++) {
-+ reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
-+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
-+ &pkt_cnt, NULL);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ if (i < PPE_AC_UNI_QUEUE_CFG_TBL_NUM) {
-+ reg = PPE_AC_UNI_QUEUE_CNT_TBL_ADDR + PPE_AC_UNI_QUEUE_CNT_TBL_INC * i;
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ pend_cnt = FIELD_GET(PPE_AC_UNI_QUEUE_CNT_TBL_PEND_CNT, val);
-+ } else {
-+ reg = PPE_AC_MUL_QUEUE_CNT_TBL_ADDR +
-+ PPE_AC_MUL_QUEUE_CNT_TBL_INC * (i - PPE_AC_UNI_QUEUE_CFG_TBL_NUM);
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret) {
-+ seq_printf(seq, "ERROR %d\n", ret);
-+ return;
-+ }
-+
-+ pend_cnt = FIELD_GET(PPE_AC_MUL_QUEUE_CNT_TBL_PEND_CNT, val);
-+ }
-+
-+ if (pkt_cnt > 0 || pend_cnt > 0) {
-+ tag++;
-+ if (!(tag % 4)) {
-+ seq_putc(seq, '\n');
-+ PREFIX_S("", "");
-+ }
-+
-+ CNT_TWO_TYPE(pkt_cnt, pend_cnt, "queue", i);
-+ }
-+ }
-+
-+ seq_putc(seq, '\n');
-+}
-+
-+/* Display the packet counter of PPE. */
-+static int ppe_packet_counter_show(struct seq_file *seq, void *v)
-+{
-+ struct ppe_device *ppe_dev = seq->private;
-+
-+ ppe_prx_drop_counter_get(ppe_dev, seq);
-+ ppe_prx_bm_drop_counter_get(ppe_dev, seq);
-+ ppe_prx_bm_port_counter_get(ppe_dev, seq);
-+ ppe_ipx_pkt_counter_get(ppe_dev, seq);
-+ ppe_port_rx_counter_get(ppe_dev, seq);
-+ ppe_vp_rx_counter_get(ppe_dev, seq);
-+ ppe_pre_l2_counter_get(ppe_dev, seq);
-+ ppe_vlan_counter_get(ppe_dev, seq);
-+ ppe_cpu_code_counter_get(ppe_dev, seq);
-+ ppe_eg_vsi_counter_get(ppe_dev, seq);
-+ ppe_vp_tx_counter_get(ppe_dev, seq);
-+ ppe_port_tx_counter_get(ppe_dev, seq);
-+ ppe_queue_tx_counter_get(ppe_dev, seq);
-+
-+ return 0;
-+}
-+
-+static int ppe_packet_counter_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, ppe_packet_counter_show, inode->i_private);
-+}
-+
-+static ssize_t ppe_packet_counter_clear(struct file *file,
-+ const char __user *buf,
-+ size_t count, loff_t *pos)
-+{
-+ struct ppe_device *ppe_dev = file_inode(file)->i_private;
-+ u32 reg;
-+ int i;
-+
-+ for (i = 0; i < PPE_DROP_CNT_NUM; i++) {
-+ reg = PPE_DROP_CNT_ADDR + i * PPE_DROP_CNT_INC;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
-+ }
-+
-+ for (i = 0; i < PPE_DROP_STAT_NUM; i++) {
-+ reg = PPE_DROP_STAT_ADDR + PPE_DROP_STAT_INC * i;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
-+ }
-+
-+ for (i = 0; i < PPE_IPR_PKT_CNT_NUM; i++) {
-+ reg = PPE_IPR_PKT_CNT_ADDR + i * PPE_IPR_PKT_CNT_INC;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
-+
-+ reg = PPE_TPR_PKT_CNT_ADDR + i * PPE_IPR_PKT_CNT_INC;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
-+ }
-+
-+ for (i = 0; i < PPE_VLAN_CNT_TBL_NUM; i++) {
-+ reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
-+ }
-+
-+ for (i = 0; i < PPE_PRE_L2_CNT_TBL_NUM; i++) {
-+ reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
-+ }
-+
-+ for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_NUM; i++) {
-+ reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
-+
-+ reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
-+ }
-+
-+ for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_NUM; i++) {
-+ reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
-+ }
-+
-+ for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_NUM; i++) {
-+ reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
-+
-+ reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
-+ }
-+
-+ for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_NUM; i++) {
-+ reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
-+ }
-+
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, PPE_EPE_DBG_IN_CNT_ADDR, PPE_PKT_CNT_SIZE_1WORD);
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, PPE_EPE_DBG_OUT_CNT_ADDR, PPE_PKT_CNT_SIZE_1WORD);
-+
-+ for (i = 0; i < PPE_DROP_CPU_CNT_TBL_NUM; i++) {
-+ reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
-+ }
-+
-+ for (i = 0; i < PPE_PORT_RX_CNT_TBL_NUM; i++) {
-+ reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
-+ }
-+
-+ for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_NUM; i++) {
-+ reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
-+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
-+ }
-+
-+ return count;
-+}
-+
-+static const struct file_operations ppe_debugfs_packet_counter_fops = {
-+ .owner = THIS_MODULE,
-+ .open = ppe_packet_counter_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+ .write = ppe_packet_counter_clear,
-+};
-+
-+void ppe_debugfs_setup(struct ppe_device *ppe_dev)
-+{
-+ ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
-+ debugfs_create_file("packet_counter", 0444,
-+ ppe_dev->debugfs_root,
-+ ppe_dev,
-+ &ppe_debugfs_packet_counter_fops);
-+}
-+
-+void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
-+{
-+ debugfs_remove_recursive(ppe_dev->debugfs_root);
-+ ppe_dev->debugfs_root = NULL;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
-@@ -0,0 +1,16 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ *
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* PPE debugfs counters setup. */
-+
-+#ifndef __PPE_DEBUGFS_H__
-+#define __PPE_DEBUGFS_H__
-+
-+#include "ppe.h"
-+
-+void ppe_debugfs_setup(struct ppe_device *ppe_dev);
-+void ppe_debugfs_teardown(struct ppe_device *ppe_dev);
-+
-+#endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -23,6 +23,43 @@
- #define PPE_RX_FIFO_CFG_INC 4
- #define PPE_RX_FIFO_CFG_THRSH GENMASK(2, 0)
-
-+#define PPE_DROP_CNT_ADDR 0xb024
-+#define PPE_DROP_CNT_NUM 8
-+#define PPE_DROP_CNT_INC 4
-+
-+/* BM port drop counter */
-+#define PPE_DROP_STAT_ADDR 0xe000
-+#define PPE_DROP_STAT_NUM 30
-+#define PPE_DROP_STAT_INC 0x10
-+
-+#define PPE_EPE_DBG_IN_CNT_ADDR 0x26054
-+#define PPE_EPE_DBG_IN_CNT_NUM 1
-+#define PPE_EPE_DBG_IN_CNT_INC 0x4
-+
-+#define PPE_EPE_DBG_OUT_CNT_ADDR 0x26070
-+#define PPE_EPE_DBG_OUT_CNT_NUM 1
-+#define PPE_EPE_DBG_OUT_CNT_INC 0x4
-+
-+/* Egress VLAN counter */
-+#define PPE_EG_VSI_COUNTER_TBL_ADDR 0x41000
-+#define PPE_EG_VSI_COUNTER_TBL_NUM 64
-+#define PPE_EG_VSI_COUNTER_TBL_INC 0x10
-+
-+/* Port TX counter */
-+#define PPE_PORT_TX_COUNTER_TBL_ADDR 0x45000
-+#define PPE_PORT_TX_COUNTER_TBL_NUM 8
-+#define PPE_PORT_TX_COUNTER_TBL_INC 0x10
-+
-+/* Virtual port TX counter */
-+#define PPE_VPORT_TX_COUNTER_TBL_ADDR 0x47000
-+#define PPE_VPORT_TX_COUNTER_TBL_NUM 256
-+#define PPE_VPORT_TX_COUNTER_TBL_INC 0x10
-+
-+/* Queue counter */
-+#define PPE_QUEUE_TX_COUNTER_TBL_ADDR 0x4a000
-+#define PPE_QUEUE_TX_COUNTER_TBL_NUM 300
-+#define PPE_QUEUE_TX_COUNTER_TBL_INC 0x10
-+
- /* RSS configs contributes to the random RSS hash value generated, which
- * is used to configure the queue offset.
- */
-@@ -224,6 +261,47 @@
- #define PPE_L2_PORT_SET_DST_INFO(tbl_cfg, value) \
- u32p_replace_bits((u32 *)tbl_cfg, value, PPE_L2_VP_PORT_W0_DST_INFO)
-
-+/* Port RX and RX drop counter */
-+#define PPE_PORT_RX_CNT_TBL_ADDR 0x150000
-+#define PPE_PORT_RX_CNT_TBL_NUM 256
-+#define PPE_PORT_RX_CNT_TBL_INC 0x20
-+
-+/* Physical port RX and RX drop counter */
-+#define PPE_PHY_PORT_RX_CNT_TBL_ADDR 0x156000
-+#define PPE_PHY_PORT_RX_CNT_TBL_NUM 8
-+#define PPE_PHY_PORT_RX_CNT_TBL_INC 0x20
-+
-+/* Counter for the packet to CPU port */
-+#define PPE_DROP_CPU_CNT_TBL_ADDR 0x160000
-+#define PPE_DROP_CPU_CNT_TBL_NUM 1280
-+#define PPE_DROP_CPU_CNT_TBL_INC 0x10
-+
-+/* VLAN counter */
-+#define PPE_VLAN_CNT_TBL_ADDR 0x178000
-+#define PPE_VLAN_CNT_TBL_NUM 64
-+#define PPE_VLAN_CNT_TBL_INC 0x10
-+
-+/* PPE L2 counter */
-+#define PPE_PRE_L2_CNT_TBL_ADDR 0x17c000
-+#define PPE_PRE_L2_CNT_TBL_NUM 64
-+#define PPE_PRE_L2_CNT_TBL_INC 0x20
-+
-+/* Port TX drop counter */
-+#define PPE_PORT_TX_DROP_CNT_TBL_ADDR 0x17d000
-+#define PPE_PORT_TX_DROP_CNT_TBL_NUM 8
-+#define PPE_PORT_TX_DROP_CNT_TBL_INC 0x10
-+
-+/* Virtual port TX counter */
-+#define PPE_VPORT_TX_DROP_CNT_TBL_ADDR 0x17e000
-+#define PPE_VPORT_TX_DROP_CNT_TBL_NUM 256
-+#define PPE_VPORT_TX_DROP_CNT_TBL_INC 0x10
-+
-+#define PPE_TPR_PKT_CNT_ADDR 0x1d0080
-+
-+#define PPE_IPR_PKT_CNT_ADDR 0x1e0080
-+#define PPE_IPR_PKT_CNT_NUM 8
-+#define PPE_IPR_PKT_CNT_INC 4
-+
- #define PPE_TL_SERVICE_TBL_ADDR 0x306000
- #define PPE_TL_SERVICE_TBL_NUM 256
- #define PPE_TL_SERVICE_TBL_INC 4
-@@ -325,6 +403,16 @@
- #define PPE_BM_PORT_GROUP_ID_INC 0x4
- #define PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID GENMASK(1, 0)
-
-+#define PPE_BM_USED_CNT_ADDR 0x6001c0
-+#define PPE_BM_USED_CNT_NUM 15
-+#define PPE_BM_USED_CNT_INC 0x4
-+#define PPE_BM_USED_CNT_VAL GENMASK(10, 0)
-+
-+#define PPE_BM_REACT_CNT_ADDR 0x600240
-+#define PPE_BM_REACT_CNT_NUM 15
-+#define PPE_BM_REACT_CNT_INC 0x4
-+#define PPE_BM_REACT_CNT_VAL GENMASK(8, 0)
-+
- #define PPE_BM_SHARED_GROUP_CFG_ADDR 0x600290
- #define PPE_BM_SHARED_GROUP_CFG_INC 0x4
- #define PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT GENMASK(10, 0)
-@@ -442,6 +530,16 @@
- #define PPE_AC_GRP_SET_BUF_LIMIT(tbl_cfg, value) \
- u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_AC_GRP_W1_BUF_LIMIT)
-
-+#define PPE_AC_UNI_QUEUE_CNT_TBL_ADDR 0x84e000
-+#define PPE_AC_UNI_QUEUE_CNT_TBL_NUM 256
-+#define PPE_AC_UNI_QUEUE_CNT_TBL_INC 0x10
-+#define PPE_AC_UNI_QUEUE_CNT_TBL_PEND_CNT GENMASK(12, 0)
-+
-+#define PPE_AC_MUL_QUEUE_CNT_TBL_ADDR 0x852000
-+#define PPE_AC_MUL_QUEUE_CNT_TBL_NUM 44
-+#define PPE_AC_MUL_QUEUE_CNT_TBL_INC 0x10
-+#define PPE_AC_MUL_QUEUE_CNT_TBL_PEND_CNT GENMASK(12, 0)
-+
- #define PPE_ENQ_OPR_TBL_ADDR 0x85c000
- #define PPE_ENQ_OPR_TBL_NUM 300
- #define PPE_ENQ_OPR_TBL_INC 0x10
+++ /dev/null
-From 028ed86f08a4fdf25213af5f5afd63b30fb7b029 Mon Sep 17 00:00:00 2001
-From: Lei Wei <quic_leiwei@quicinc.com>
-Date: Thu, 29 Feb 2024 16:59:53 +0800
-Subject: [PATCH 32/50] net: ethernet: qualcomm: Add phylink support for PPE
- MAC ports
-
-Add MAC initialization and phylink functions for PPE MAC ports.
-
-Change-Id: I39dcba671732392bcfa2e734473fd083989bfbec
-Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
----
- drivers/net/ethernet/qualcomm/Kconfig | 3 +
- drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
- drivers/net/ethernet/qualcomm/ppe/ppe.c | 9 +
- drivers/net/ethernet/qualcomm/ppe/ppe.h | 2 +
- drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 728 +++++++++++++++++++
- drivers/net/ethernet/qualcomm/ppe/ppe_port.h | 76 ++
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 123 ++++
- 7 files changed, 942 insertions(+), 1 deletion(-)
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_port.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_port.h
-
---- a/drivers/net/ethernet/qualcomm/Kconfig
-+++ b/drivers/net/ethernet/qualcomm/Kconfig
-@@ -66,6 +66,9 @@ config QCOM_PPE
- depends on HAS_IOMEM && OF
- depends on COMMON_CLK
- select REGMAP_MMIO
-+ select PHYLINK
-+ select PCS_QCOM_IPQ_UNIPHY
-+ select SFP
- help
- This driver supports the Qualcomm Technologies, Inc. packet
- process engine (PPE) available with IPQ SoC. The PPE houses
---- a/drivers/net/ethernet/qualcomm/ppe/Makefile
-+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
-@@ -4,4 +4,4 @@
- #
-
- obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
--qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o
-+qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
---- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
-@@ -17,6 +17,7 @@
- #include "ppe.h"
- #include "ppe_config.h"
- #include "ppe_debugfs.h"
-+#include "ppe_port.h"
-
- #define PPE_PORT_MAX 8
- #define PPE_CLK_RATE 353000000
-@@ -207,6 +208,11 @@ static int qcom_ppe_probe(struct platfor
- if (ret)
- return dev_err_probe(dev, ret, "PPE HW config failed\n");
-
-+ ret = ppe_port_mac_init(ppe_dev);
-+ if (ret)
-+ return dev_err_probe(dev, ret,
-+ "PPE Port MAC initialization failed\n");
-+
- ppe_debugfs_setup(ppe_dev);
- platform_set_drvdata(pdev, ppe_dev);
-
-@@ -219,6 +225,9 @@ static void qcom_ppe_remove(struct platf
-
- ppe_dev = platform_get_drvdata(pdev);
- ppe_debugfs_teardown(ppe_dev);
-+ ppe_port_mac_deinit(ppe_dev);
-+
-+ platform_set_drvdata(pdev, NULL);
- }
-
- static const struct of_device_id qcom_ppe_of_match[] = {
---- a/drivers/net/ethernet/qualcomm/ppe/ppe.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.h
-@@ -20,6 +20,7 @@ struct dentry;
- * @clk_rate: PPE clock rate.
- * @num_ports: Number of PPE ports.
- * @debugfs_root: PPE debug root entry.
-+ * @ports: PPE MAC ports.
- * @num_icc_paths: Number of interconnect paths.
- * @icc_paths: Interconnect path array.
- *
-@@ -33,6 +34,7 @@ struct ppe_device {
- unsigned long clk_rate;
- unsigned int num_ports;
- struct dentry *debugfs_root;
-+ struct ppe_ports *ports;
- unsigned int num_icc_paths;
- struct icc_bulk_data icc_paths[] __counted_by(num_icc_paths);
- };
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
-@@ -0,0 +1,728 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* PPE Port MAC initialization and PPE port MAC functions. */
-+
-+#include <linux/clk.h>
-+#include <linux/of_net.h>
-+#include <linux/pcs/pcs-qcom-ipq-uniphy.h>
-+#include <linux/phylink.h>
-+#include <linux/reset.h>
-+#include <linux/regmap.h>
-+#include <linux/rtnetlink.h>
-+
-+#include "ppe.h"
-+#include "ppe_port.h"
-+#include "ppe_regs.h"
-+
-+/* PPE MAC max frame size which including 4bytes FCS */
-+#define PPE_PORT_MAC_MAX_FRAME_SIZE 0x3000
-+
-+/* PPE BM port start for PPE MAC ports */
-+#define PPE_BM_PORT_MAC_START 7
-+
-+/* PPE port clock and reset name */
-+static const char * const ppe_port_clk_rst_name[] = {
-+ [PPE_PORT_CLK_RST_MAC] = "port_mac",
-+ [PPE_PORT_CLK_RST_RX] = "port_rx",
-+ [PPE_PORT_CLK_RST_TX] = "port_tx",
-+};
-+
-+/* PPE port and MAC reset */
-+static int ppe_port_mac_reset(struct ppe_port *ppe_port)
-+{
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ int ret;
-+
-+ ret = reset_control_assert(ppe_port->rstcs[PPE_PORT_CLK_RST_MAC]);
-+ if (ret)
-+ goto error;
-+
-+ ret = reset_control_assert(ppe_port->rstcs[PPE_PORT_CLK_RST_RX]);
-+ if (ret)
-+ goto error;
-+
-+ ret = reset_control_assert(ppe_port->rstcs[PPE_PORT_CLK_RST_TX]);
-+ if (ret)
-+ goto error;
-+
-+ /* 150ms delay is required by hardware to reset PPE port and MAC */
-+ msleep(150);
-+
-+ ret = reset_control_deassert(ppe_port->rstcs[PPE_PORT_CLK_RST_MAC]);
-+ if (ret)
-+ goto error;
-+
-+ ret = reset_control_deassert(ppe_port->rstcs[PPE_PORT_CLK_RST_RX]);
-+ if (ret)
-+ goto error;
-+
-+ ret = reset_control_deassert(ppe_port->rstcs[PPE_PORT_CLK_RST_TX]);
-+ if (ret)
-+ goto error;
-+
-+ return ret;
-+
-+error:
-+ dev_err(ppe_dev->dev, "%s: port %d reset fail %d\n",
-+ __func__, ppe_port->port_id, ret);
-+ return ret;
-+}
-+
-+/* PPE port MAC configuration for phylink */
-+static void ppe_port_mac_config(struct phylink_config *config,
-+ unsigned int mode,
-+ const struct phylink_link_state *state)
-+{
-+ struct ppe_port *ppe_port = container_of(config, struct ppe_port,
-+ phylink_config);
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ int port = ppe_port->port_id;
-+ enum ppe_mac_type mac_type;
-+ u32 val, mask;
-+ int ret;
-+
-+ switch (state->interface) {
-+ case PHY_INTERFACE_MODE_2500BASEX:
-+ case PHY_INTERFACE_MODE_USXGMII:
-+ case PHY_INTERFACE_MODE_10GBASER:
-+ case PHY_INTERFACE_MODE_10G_QXGMII:
-+ mac_type = PPE_MAC_TYPE_XGMAC;
-+ break;
-+ case PHY_INTERFACE_MODE_QSGMII:
-+ case PHY_INTERFACE_MODE_PSGMII:
-+ case PHY_INTERFACE_MODE_SGMII:
-+ case PHY_INTERFACE_MODE_1000BASEX:
-+ mac_type = PPE_MAC_TYPE_GMAC;
-+ break;
-+ default:
-+ dev_err(ppe_dev->dev, "%s: Unsupport interface %s\n",
-+ __func__, phy_modes(state->interface));
-+ return;
-+ }
-+
-+ /* Reset Port MAC for GMAC */
-+ if (mac_type == PPE_MAC_TYPE_GMAC) {
-+ ret = ppe_port_mac_reset(ppe_port);
-+ if (ret)
-+ goto err_mac_config;
-+ }
-+
-+ /* Port mux to select GMAC or XGMAC */
-+ mask = PPE_PORT_SEL_XGMAC(port);
-+ val = mac_type == PPE_MAC_TYPE_GMAC ? 0 : mask;
-+ ret = regmap_update_bits(ppe_dev->regmap,
-+ PPE_PORT_MUX_CTRL_ADDR,
-+ mask, val);
-+ if (ret)
-+ goto err_mac_config;
-+
-+ ppe_port->mac_type = mac_type;
-+
-+ return;
-+
-+err_mac_config:
-+ dev_err(ppe_dev->dev, "%s: port %d MAC config fail %d\n",
-+ __func__, port, ret);
-+}
-+
-+/* PPE port GMAC link up configuration */
-+static int ppe_port_gmac_link_up(struct ppe_port *ppe_port, int speed,
-+ int duplex, bool tx_pause, bool rx_pause)
-+{
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ int ret, port = ppe_port->port_id;
-+ u32 reg, val;
-+
-+ /* Set GMAC speed */
-+ switch (speed) {
-+ case SPEED_1000:
-+ val = GMAC_SPEED_1000;
-+ break;
-+ case SPEED_100:
-+ val = GMAC_SPEED_100;
-+ break;
-+ case SPEED_10:
-+ val = GMAC_SPEED_10;
-+ break;
-+ default:
-+ dev_err(ppe_dev->dev, "%s: Invalid GMAC speed %s\n",
-+ __func__, phy_speed_to_str(speed));
-+ return -EINVAL;
-+ }
-+
-+ reg = PPE_PORT_GMAC_ADDR(port);
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_SPEED_ADDR,
-+ GMAC_SPEED_M, val);
-+ if (ret)
-+ return ret;
-+
-+ /* Set duplex, flow control and enable GMAC */
-+ val = GMAC_TRXEN;
-+ if (duplex == DUPLEX_FULL)
-+ val |= GMAC_DUPLEX_FULL;
-+ if (tx_pause)
-+ val |= GMAC_TXFCEN;
-+ if (rx_pause)
-+ val |= GMAC_RXFCEN;
-+
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_ENABLE_ADDR,
-+ GMAC_ENABLE_ALL, val);
-+
-+ return ret;
-+}
-+
-+/* PPE port XGMAC link up configuration */
-+static int ppe_port_xgmac_link_up(struct ppe_port *ppe_port,
-+ phy_interface_t interface,
-+ int speed, int duplex,
-+ bool tx_pause, bool rx_pause)
-+{
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ int ret, port = ppe_port->port_id;
-+ u32 reg, val;
-+
-+ /* Set XGMAC TX speed and enable TX */
-+ switch (speed) {
-+ case SPEED_10000:
-+ if (interface == PHY_INTERFACE_MODE_USXGMII)
-+ val = XGMAC_SPEED_10000_USXGMII;
-+ else
-+ val = XGMAC_SPEED_10000;
-+ break;
-+ case SPEED_5000:
-+ val = XGMAC_SPEED_5000;
-+ break;
-+ case SPEED_2500:
-+ if (interface == PHY_INTERFACE_MODE_USXGMII ||
-+ interface == PHY_INTERFACE_MODE_10G_QXGMII)
-+ val = XGMAC_SPEED_2500_USXGMII;
-+ else
-+ val = XGMAC_SPEED_2500;
-+ break;
-+ case SPEED_1000:
-+ val = XGMAC_SPEED_1000;
-+ break;
-+ case SPEED_100:
-+ val = XGMAC_SPEED_100;
-+ break;
-+ case SPEED_10:
-+ val = XGMAC_SPEED_10;
-+ break;
-+ default:
-+ dev_err(ppe_dev->dev, "%s: Invalid XGMAC speed %s\n",
-+ __func__, phy_speed_to_str(speed));
-+ return -EINVAL;
-+ }
-+
-+ reg = PPE_PORT_XGMAC_ADDR(port);
-+ val |= XGMAC_TXEN;
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_TX_CONFIG_ADDR,
-+ XGMAC_SPEED_M | XGMAC_TXEN, val);
-+ if (ret)
-+ return ret;
-+
-+ /* Set XGMAC TX flow control */
-+ val = FIELD_PREP(XGMAC_PAUSE_TIME_M, FIELD_MAX(XGMAC_PAUSE_TIME_M));
-+ val |= tx_pause ? XGMAC_TXFCEN : 0;
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_TX_FLOW_CTRL_ADDR,
-+ XGMAC_PAUSE_TIME_M | XGMAC_TXFCEN, val);
-+ if (ret)
-+ return ret;
-+
-+ /* Set XGMAC RX flow control */
-+ val = rx_pause ? XGMAC_RXFCEN : 0;
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_RX_FLOW_CTRL_ADDR,
-+ XGMAC_RXFCEN, val);
-+ if (ret)
-+ return ret;
-+
-+ /* Enable XGMAC RX*/
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_RX_CONFIG_ADDR,
-+ XGMAC_RXEN, XGMAC_RXEN);
-+
-+ return ret;
-+}
-+
-+/* PPE port MAC link up configuration for phylink */
-+static void ppe_port_mac_link_up(struct phylink_config *config,
-+ struct phy_device *phy,
-+ unsigned int mode,
-+ phy_interface_t interface,
-+ int speed, int duplex,
-+ bool tx_pause, bool rx_pause)
-+{
-+ struct ppe_port *ppe_port = container_of(config, struct ppe_port,
-+ phylink_config);
-+ enum ppe_mac_type mac_type = ppe_port->mac_type;
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ int ret, port = ppe_port->port_id;
-+ u32 reg, val;
-+
-+ if (mac_type == PPE_MAC_TYPE_GMAC)
-+ ret = ppe_port_gmac_link_up(ppe_port,
-+ speed, duplex, tx_pause, rx_pause);
-+ else
-+ ret = ppe_port_xgmac_link_up(ppe_port, interface,
-+ speed, duplex, tx_pause, rx_pause);
-+ if (ret)
-+ goto err_port_mac_link_up;
-+
-+ /* Set PPE port BM flow control */
-+ reg = PPE_BM_PORT_FC_MODE_ADDR +
-+ PPE_BM_PORT_FC_MODE_INC * (port + PPE_BM_PORT_MAC_START);
-+ val = tx_pause ? PPE_BM_PORT_FC_MODE_EN : 0;
-+ ret = regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_BM_PORT_FC_MODE_EN, val);
-+ if (ret)
-+ goto err_port_mac_link_up;
-+
-+ /* Enable PPE port TX */
-+ reg = PPE_PORT_BRIDGE_CTRL_ADDR + PPE_PORT_BRIDGE_CTRL_INC * port;
-+ ret = regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_PORT_BRIDGE_TXMAC_EN,
-+ PPE_PORT_BRIDGE_TXMAC_EN);
-+ if (ret)
-+ goto err_port_mac_link_up;
-+
-+ return;
-+
-+err_port_mac_link_up:
-+ dev_err(ppe_dev->dev, "%s: port %d link up fail %d\n",
-+ __func__, port, ret);
-+}
-+
-+/* PPE port MAC link down configuration for phylink */
-+static void ppe_port_mac_link_down(struct phylink_config *config,
-+ unsigned int mode,
-+ phy_interface_t interface)
-+{
-+ struct ppe_port *ppe_port = container_of(config, struct ppe_port,
-+ phylink_config);
-+ enum ppe_mac_type mac_type = ppe_port->mac_type;
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ int ret, port = ppe_port->port_id;
-+ u32 reg;
-+
-+ /* Disable PPE port TX */
-+ reg = PPE_PORT_BRIDGE_CTRL_ADDR + PPE_PORT_BRIDGE_CTRL_INC * port;
-+ ret = regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_PORT_BRIDGE_TXMAC_EN, 0);
-+ if (ret)
-+ goto err_port_mac_link_down;
-+
-+ /* Disable PPE MAC */
-+ if (mac_type == PPE_MAC_TYPE_GMAC) {
-+ reg = PPE_PORT_GMAC_ADDR(port) + GMAC_ENABLE_ADDR;
-+ ret = regmap_update_bits(ppe_dev->regmap, reg, GMAC_TRXEN, 0);
-+ if (ret)
-+ goto err_port_mac_link_down;
-+ } else {
-+ reg = PPE_PORT_XGMAC_ADDR(port);
-+ ret = regmap_update_bits(ppe_dev->regmap,
-+ reg + XGMAC_RX_CONFIG_ADDR,
-+ XGMAC_RXEN, 0);
-+ if (ret)
-+ goto err_port_mac_link_down;
-+
-+ ret = regmap_update_bits(ppe_dev->regmap,
-+ reg + XGMAC_TX_CONFIG_ADDR,
-+ XGMAC_TXEN, 0);
-+ if (ret)
-+ goto err_port_mac_link_down;
-+ }
-+
-+ return;
-+
-+err_port_mac_link_down:
-+ dev_err(ppe_dev->dev, "%s: port %d link down fail %d\n",
-+ __func__, port, ret);
-+}
-+
-+/* PPE port MAC PCS selection for phylink */
-+static
-+struct phylink_pcs *ppe_port_mac_select_pcs(struct phylink_config *config,
-+ phy_interface_t interface)
-+{
-+ struct ppe_port *ppe_port = container_of(config, struct ppe_port,
-+ phylink_config);
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ int ret, port = ppe_port->port_id;
-+ u32 val;
-+
-+ /* PPE port5 can connects with PCS0 or PCS1. In PSGMII
-+ * mode, it selects PCS0; otherwise, it selects PCS1.
-+ */
-+ if (port == 5) {
-+ val = interface == PHY_INTERFACE_MODE_PSGMII ?
-+ 0 : PPE_PORT5_SEL_PCS1;
-+ ret = regmap_update_bits(ppe_dev->regmap,
-+ PPE_PORT_MUX_CTRL_ADDR,
-+ PPE_PORT5_SEL_PCS1, val);
-+ if (ret) {
-+ dev_err(ppe_dev->dev, "%s: port5 select PCS fail %d\n",
-+ __func__, ret);
-+ return NULL;
-+ }
-+ }
-+
-+ return ppe_port->pcs;
-+}
-+
-+static const struct phylink_mac_ops ppe_phylink_ops = {
-+ .mac_config = ppe_port_mac_config,
-+ .mac_link_up = ppe_port_mac_link_up,
-+ .mac_link_down = ppe_port_mac_link_down,
-+ .mac_select_pcs = ppe_port_mac_select_pcs,
-+};
-+
-+/**
-+ * ppe_port_phylink_setup() - Set phylink instance for the given PPE port
-+ * @ppe_port: PPE port
-+ * @netdev: Netdevice
-+ *
-+ * Description: Wrapper function to help setup phylink for the PPE port
-+ * specified by @ppe_port and associated with the net device @netdev.
-+ *
-+ * Return: 0 upon success or a negative error upon failure.
-+ */
-+int ppe_port_phylink_setup(struct ppe_port *ppe_port, struct net_device *netdev)
-+{
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ struct device_node *pcs_node;
-+ int ret;
-+
-+ /* Create PCS */
-+ pcs_node = of_parse_phandle(ppe_port->np, "pcs-handle", 0);
-+ if (!pcs_node)
-+ return -ENODEV;
-+
-+ ppe_port->pcs = ipq_unipcs_create(pcs_node);
-+ of_node_put(pcs_node);
-+ if (IS_ERR(ppe_port->pcs)) {
-+ dev_err(ppe_dev->dev, "%s: port %d failed to create PCS\n",
-+ __func__, ppe_port->port_id);
-+ return PTR_ERR(ppe_port->pcs);
-+ }
-+
-+ /* Port phylink capability */
-+ ppe_port->phylink_config.dev = &netdev->dev;
-+ ppe_port->phylink_config.type = PHYLINK_NETDEV;
-+ ppe_port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
-+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000 |
-+ MAC_2500FD | MAC_5000FD | MAC_10000FD;
-+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
-+ ppe_port->phylink_config.supported_interfaces);
-+ __set_bit(PHY_INTERFACE_MODE_PSGMII,
-+ ppe_port->phylink_config.supported_interfaces);
-+ __set_bit(PHY_INTERFACE_MODE_SGMII,
-+ ppe_port->phylink_config.supported_interfaces);
-+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
-+ ppe_port->phylink_config.supported_interfaces);
-+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
-+ ppe_port->phylink_config.supported_interfaces);
-+ __set_bit(PHY_INTERFACE_MODE_USXGMII,
-+ ppe_port->phylink_config.supported_interfaces);
-+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
-+ ppe_port->phylink_config.supported_interfaces);
-+ __set_bit(PHY_INTERFACE_MODE_10G_QXGMII,
-+ ppe_port->phylink_config.supported_interfaces);
-+
-+ /* Create phylink */
-+ ppe_port->phylink = phylink_create(&ppe_port->phylink_config,
-+ of_fwnode_handle(ppe_port->np),
-+ ppe_port->interface,
-+ &ppe_phylink_ops);
-+ if (IS_ERR(ppe_port->phylink)) {
-+ dev_err(ppe_dev->dev, "%s: port %d failed to create phylink\n",
-+ __func__, ppe_port->port_id);
-+ ret = PTR_ERR(ppe_port->phylink);
-+ goto err_free_pcs;
-+ }
-+
-+ /* Connect phylink */
-+ ret = phylink_of_phy_connect(ppe_port->phylink, ppe_port->np, 0);
-+ if (ret) {
-+ dev_err(ppe_dev->dev, "%s: port %d failed to connect phylink\n",
-+ __func__, ppe_port->port_id);
-+ goto err_free_phylink;
-+ }
-+
-+ return 0;
-+
-+err_free_phylink:
-+ phylink_destroy(ppe_port->phylink);
-+ ppe_port->phylink = NULL;
-+err_free_pcs:
-+ ipq_unipcs_destroy(ppe_port->pcs);
-+ ppe_port->pcs = NULL;
-+ return ret;
-+}
-+
-+/**
-+ * ppe_port_phylink_destroy() - Destroy phylink instance for the given PPE port
-+ * @ppe_port: PPE port
-+ *
-+ * Description: Wrapper function to help destroy phylink for the PPE port
-+ * specified by @ppe_port.
-+ */
-+void ppe_port_phylink_destroy(struct ppe_port *ppe_port)
-+{
-+ /* Destroy phylink */
-+ if (ppe_port->phylink) {
-+ rtnl_lock();
-+ phylink_disconnect_phy(ppe_port->phylink);
-+ rtnl_unlock();
-+ phylink_destroy(ppe_port->phylink);
-+ ppe_port->phylink = NULL;
-+ }
-+
-+ /* Destroy PCS */
-+ if (ppe_port->pcs) {
-+ ipq_unipcs_destroy(ppe_port->pcs);
-+ ppe_port->pcs = NULL;
-+ }
-+}
-+
-+/* PPE port clock initialization */
-+static int ppe_port_clock_init(struct ppe_port *ppe_port)
-+{
-+ struct device_node *port_node = ppe_port->np;
-+ struct reset_control *rstc;
-+ struct clk *clk;
-+ int i, j, ret;
-+
-+ for (i = 0; i < PPE_PORT_CLK_RST_MAX; i++) {
-+ /* Get PPE port resets which will be used to reset PPE
-+ * port and MAC.
-+ */
-+ rstc = of_reset_control_get_exclusive(port_node,
-+ ppe_port_clk_rst_name[i]);
-+ if (IS_ERR(rstc)) {
-+ ret = PTR_ERR(rstc);
-+ goto err_rst;
-+ }
-+
-+ clk = of_clk_get_by_name(port_node, ppe_port_clk_rst_name[i]);
-+ if (IS_ERR(clk)) {
-+ ret = PTR_ERR(clk);
-+ goto err_clk_get;
-+ }
-+
-+ ret = clk_prepare_enable(clk);
-+ if (ret)
-+ goto err_clk_en;
-+
-+ ppe_port->clks[i] = clk;
-+ ppe_port->rstcs[i] = rstc;
-+ }
-+
-+ return 0;
-+
-+err_clk_en:
-+ clk_put(clk);
-+err_clk_get:
-+ reset_control_put(rstc);
-+err_rst:
-+ for (j = 0; j < i; j++) {
-+ clk_disable_unprepare(ppe_port->clks[j]);
-+ clk_put(ppe_port->clks[j]);
-+ reset_control_put(ppe_port->rstcs[j]);
-+ }
-+
-+ return ret;
-+}
-+
-+/* PPE port clock deinitialization */
-+static void ppe_port_clock_deinit(struct ppe_port *ppe_port)
-+{
-+ int i;
-+
-+ for (i = 0; i < PPE_PORT_CLK_RST_MAX; i++) {
-+ clk_disable_unprepare(ppe_port->clks[i]);
-+ clk_put(ppe_port->clks[i]);
-+ reset_control_put(ppe_port->rstcs[i]);
-+ }
-+}
-+
-+/* PPE port MAC hardware init configuration */
-+static int ppe_port_mac_hw_init(struct ppe_port *ppe_port)
-+{
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ int ret, port = ppe_port->port_id;
-+ u32 reg, val;
-+
-+ /* GMAC RX and TX are initialized as disabled */
-+ reg = PPE_PORT_GMAC_ADDR(port);
-+ ret = regmap_update_bits(ppe_dev->regmap,
-+ reg + GMAC_ENABLE_ADDR, GMAC_TRXEN, 0);
-+ if (ret)
-+ return ret;
-+
-+ /* GMAC max frame size configuration */
-+ val = FIELD_PREP(GMAC_JUMBO_SIZE_M, PPE_PORT_MAC_MAX_FRAME_SIZE);
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_JUMBO_SIZE_ADDR,
-+ GMAC_JUMBO_SIZE_M, val);
-+ if (ret)
-+ return ret;
-+
-+ val = FIELD_PREP(GMAC_MAXFRAME_SIZE_M, PPE_PORT_MAC_MAX_FRAME_SIZE);
-+ val |= FIELD_PREP(GMAC_TX_THD_M, 0x1);
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_CTRL_ADDR,
-+ GMAC_CTRL_MASK, val);
-+ if (ret)
-+ return ret;
-+
-+ val = FIELD_PREP(GMAC_HIGH_IPG_M, 0xc);
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_DBG_CTRL_ADDR,
-+ GMAC_HIGH_IPG_M, val);
-+ if (ret)
-+ return ret;
-+
-+ /* Enable and reset GMAC MIB counters and set as read clear
-+ * mode, the GMAC MIB counters will be cleared after reading.
-+ */
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_MIB_CTRL_ADDR,
-+ GMAC_MIB_CTRL_MASK, GMAC_MIB_CTRL_MASK);
-+ if (ret)
-+ return ret;
-+
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_MIB_CTRL_ADDR,
-+ GMAC_MIB_RST, 0);
-+ if (ret)
-+ return ret;
-+
-+ /* XGMAC RX and TX disabled and max frame size configuration */
-+ reg = PPE_PORT_XGMAC_ADDR(port);
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_TX_CONFIG_ADDR,
-+ XGMAC_TXEN | XGMAC_JD, XGMAC_JD);
-+ if (ret)
-+ return ret;
-+
-+ val = FIELD_PREP(XGMAC_GPSL_M, PPE_PORT_MAC_MAX_FRAME_SIZE);
-+ val |= XGMAC_GPSLEN;
-+ val |= XGMAC_CST;
-+ val |= XGMAC_ACS;
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_RX_CONFIG_ADDR,
-+ XGMAC_RX_CONFIG_MASK, val);
-+ if (ret)
-+ return ret;
-+
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_WD_TIMEOUT_ADDR,
-+ XGMAC_WD_TIMEOUT_MASK, XGMAC_WD_TIMEOUT_VAL);
-+ if (ret)
-+ return ret;
-+
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_PKT_FILTER_ADDR,
-+ XGMAC_PKT_FILTER_MASK, XGMAC_PKT_FILTER_VAL);
-+ if (ret)
-+ return ret;
-+
-+ /* Enable and reset XGMAC MIB counters */
-+ ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_MMC_CTRL_ADDR,
-+ XGMAC_MCF | XGMAC_CNTRST, XGMAC_CNTRST);
-+
-+ return ret;
-+}
-+
-+/**
-+ * ppe_port_mac_init() - Initialization of PPE ports for the PPE device
-+ * @ppe_dev: PPE device
-+ *
-+ * Description: Initialize the PPE MAC ports on the PPE device specified
-+ * by @ppe_dev.
-+ *
-+ * Return: 0 upon success or a negative error upon failure.
-+ */
-+int ppe_port_mac_init(struct ppe_device *ppe_dev)
-+{
-+ struct device_node *ports_node, *port_node;
-+ int port, num, ret, j, i = 0;
-+ struct ppe_ports *ppe_ports;
-+ phy_interface_t phy_mode;
-+
-+ ports_node = of_get_child_by_name(ppe_dev->dev->of_node,
-+ "ethernet-ports");
-+ if (!ports_node) {
-+ dev_err(ppe_dev->dev, "Failed to get ports node\n");
-+ return -ENODEV;
-+ }
-+
-+ num = of_get_available_child_count(ports_node);
-+
-+ ppe_ports = devm_kzalloc(ppe_dev->dev,
-+ struct_size(ppe_ports, port, num),
-+ GFP_KERNEL);
-+ if (!ppe_ports) {
-+ ret = -ENOMEM;
-+ goto err_ports_node;
-+ }
-+
-+ ppe_dev->ports = ppe_ports;
-+ ppe_ports->num = num;
-+
-+ for_each_available_child_of_node(ports_node, port_node) {
-+ ret = of_property_read_u32(port_node, "reg", &port);
-+ if (ret) {
-+ dev_err(ppe_dev->dev, "Failed to get port id\n");
-+ goto err_port_node;
-+ }
-+
-+ ret = of_get_phy_mode(port_node, &phy_mode);
-+ if (ret) {
-+ dev_err(ppe_dev->dev, "Failed to get phy mode\n");
-+ goto err_port_node;
-+ }
-+
-+ ppe_ports->port[i].ppe_dev = ppe_dev;
-+ ppe_ports->port[i].port_id = port;
-+ ppe_ports->port[i].np = port_node;
-+ ppe_ports->port[i].interface = phy_mode;
-+
-+ ret = ppe_port_clock_init(&ppe_ports->port[i]);
-+ if (ret) {
-+ dev_err(ppe_dev->dev, "Failed to initialize port clocks\n");
-+ goto err_port_clk;
-+ }
-+
-+ ret = ppe_port_mac_hw_init(&ppe_ports->port[i]);
-+ if (ret) {
-+ dev_err(ppe_dev->dev, "Failed to initialize MAC hardware\n");
-+ goto err_port_node;
-+ }
-+
-+ i++;
-+ }
-+
-+ of_node_put(ports_node);
-+ return 0;
-+
-+err_port_clk:
-+ for (j = 0; j < i; j++)
-+ ppe_port_clock_deinit(&ppe_ports->port[j]);
-+err_port_node:
-+ of_node_put(port_node);
-+err_ports_node:
-+ of_node_put(ports_node);
-+ return ret;
-+}
-+
-+/**
-+ * ppe_port_mac_deinit() - Deinitialization of PPE ports for the PPE device
-+ * @ppe_dev: PPE device
-+ *
-+ * Description: Deinitialize the PPE MAC ports on the PPE device specified
-+ * by @ppe_dev.
-+ */
-+void ppe_port_mac_deinit(struct ppe_device *ppe_dev)
-+{
-+ struct ppe_port *ppe_port;
-+ int i;
-+
-+ for (i = 0; i < ppe_dev->ports->num; i++) {
-+ ppe_port = &ppe_dev->ports->port[i];
-+ ppe_port_clock_deinit(ppe_port);
-+ }
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
-@@ -0,0 +1,76 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ *
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef __PPE_PORT_H__
-+#define __PPE_PORT_H__
-+
-+#include <linux/phylink.h>
-+
-+/**
-+ * enum ppe_port_clk_rst_type - PPE port clock and reset ID type
-+ * @PPE_PORT_CLK_RST_MAC: The clock and reset ID for port MAC
-+ * @PPE_PORT_CLK_RST_RX: The clock and reset ID for port receive path
-+ * @PPE_PORT_CLK_RST_TX: The clock and reset for port transmit path
-+ * @PPE_PORT_CLK_RST_MAX: The maximum of port clock and reset
-+ */
-+enum ppe_port_clk_rst_type {
-+ PPE_PORT_CLK_RST_MAC,
-+ PPE_PORT_CLK_RST_RX,
-+ PPE_PORT_CLK_RST_TX,
-+ PPE_PORT_CLK_RST_MAX,
-+};
-+
-+/**
-+ * enum ppe_mac_type - PPE MAC type
-+ * @PPE_MAC_TYPE_GMAC: GMAC type
-+ * @PPE_MAC_TYPE_XGMAC: XGMAC type
-+ */
-+enum ppe_mac_type {
-+ PPE_MAC_TYPE_GMAC,
-+ PPE_MAC_TYPE_XGMAC,
-+};
-+
-+/**
-+ * struct ppe_port - Private data for each PPE port
-+ * @phylink: Linux phylink instance
-+ * @phylink_config: Linux phylink configurations
-+ * @pcs: Linux phylink PCS instance
-+ * @np: Port device tree node
-+ * @ppe_dev: Back pointer to PPE device private data
-+ * @interface: Port interface mode
-+ * @mac_type: Port MAC type, GMAC or XGMAC
-+ * @port_id: Port ID
-+ * @clks: Port clocks
-+ * @rstcs: Port resets
-+ */
-+struct ppe_port {
-+ struct phylink *phylink;
-+ struct phylink_config phylink_config;
-+ struct phylink_pcs *pcs;
-+ struct device_node *np;
-+ struct ppe_device *ppe_dev;
-+ phy_interface_t interface;
-+ enum ppe_mac_type mac_type;
-+ int port_id;
-+ struct clk *clks[PPE_PORT_CLK_RST_MAX];
-+ struct reset_control *rstcs[PPE_PORT_CLK_RST_MAX];
-+};
-+
-+/**
-+ * struct ppe_ports - Array of PPE ports
-+ * @num: Number of PPE ports
-+ * @port: Each PPE port private data
-+ */
-+struct ppe_ports {
-+ unsigned int num;
-+ struct ppe_port port[] __counted_by(num);
-+};
-+
-+int ppe_port_mac_init(struct ppe_device *ppe_dev);
-+void ppe_port_mac_deinit(struct ppe_device *ppe_dev);
-+int ppe_port_phylink_setup(struct ppe_port *ppe_port,
-+ struct net_device *netdev);
-+void ppe_port_phylink_destroy(struct ppe_port *ppe_port);
-+#endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -7,6 +7,17 @@
- #ifndef __PPE_REGS_H__
- #define __PPE_REGS_H__
-
-+/* PPE port mux select control register */
-+#define PPE_PORT_MUX_CTRL_ADDR 0x10
-+#define PPE_PORT6_SEL_XGMAC BIT(13)
-+#define PPE_PORT5_SEL_XGMAC BIT(12)
-+#define PPE_PORT4_SEL_XGMAC BIT(11)
-+#define PPE_PORT3_SEL_XGMAC BIT(10)
-+#define PPE_PORT2_SEL_XGMAC BIT(9)
-+#define PPE_PORT1_SEL_XGMAC BIT(8)
-+#define PPE_PORT5_SEL_PCS1 BIT(4)
-+#define PPE_PORT_SEL_XGMAC(x) (BIT(8) << ((x) - 1))
-+
- /* There are 15 BM ports and 4 BM groups supported by PPE,
- * BM port (0-7) is matched to EDMA port 0, BM port (8-13) is matched
- * to PPE physical port 1-6, BM port 14 is matched to EIP.
-@@ -545,4 +556,116 @@
- #define PPE_ENQ_OPR_TBL_INC 0x10
- #define PPE_ENQ_OPR_TBL_ENQ_DISABLE BIT(0)
-
-+/* PPE GMAC and XGMAC register base address */
-+#define PPE_PORT_GMAC_ADDR(x) (0x001000 + ((x) - 1) * 0x200)
-+#define PPE_PORT_XGMAC_ADDR(x) (0x500000 + ((x) - 1) * 0x4000)
-+
-+/* GMAC enable register */
-+#define GMAC_ENABLE_ADDR 0x0
-+#define GMAC_TXFCEN BIT(6)
-+#define GMAC_RXFCEN BIT(5)
-+#define GMAC_DUPLEX_FULL BIT(4)
-+#define GMAC_TXEN BIT(1)
-+#define GMAC_RXEN BIT(0)
-+
-+#define GMAC_TRXEN \
-+ (GMAC_TXEN | GMAC_RXEN)
-+#define GMAC_ENABLE_ALL \
-+ (GMAC_TXFCEN | GMAC_RXFCEN | GMAC_DUPLEX_FULL | GMAC_TXEN | GMAC_RXEN)
-+
-+/* GMAC speed register */
-+#define GMAC_SPEED_ADDR 0x4
-+#define GMAC_SPEED_M GENMASK(1, 0)
-+#define GMAC_SPEED_10 0
-+#define GMAC_SPEED_100 1
-+#define GMAC_SPEED_1000 2
-+
-+/* GMAC control register */
-+#define GMAC_CTRL_ADDR 0x18
-+#define GMAC_TX_THD_M GENMASK(27, 24)
-+#define GMAC_MAXFRAME_SIZE_M GENMASK(21, 8)
-+#define GMAC_CRS_SEL BIT(6)
-+
-+#define GMAC_CTRL_MASK \
-+ (GMAC_TX_THD_M | GMAC_MAXFRAME_SIZE_M | GMAC_CRS_SEL)
-+
-+/* GMAC debug control register */
-+#define GMAC_DBG_CTRL_ADDR 0x1c
-+#define GMAC_HIGH_IPG_M GENMASK(15, 8)
-+
-+/* GMAC jumbo size register */
-+#define GMAC_JUMBO_SIZE_ADDR 0x30
-+#define GMAC_JUMBO_SIZE_M GENMASK(13, 0)
-+
-+/* GMAC MIB control register */
-+#define GMAC_MIB_CTRL_ADDR 0x34
-+#define GMAC_MIB_RD_CLR BIT(2)
-+#define GMAC_MIB_RST BIT(1)
-+#define GMAC_MIB_EN BIT(0)
-+
-+#define GMAC_MIB_CTRL_MASK \
-+ (GMAC_MIB_RD_CLR | GMAC_MIB_RST | GMAC_MIB_EN)
-+
-+/* XGMAC TX configuration register */
-+#define XGMAC_TX_CONFIG_ADDR 0x0
-+#define XGMAC_SPEED_M GENMASK(31, 29)
-+#define XGMAC_SPEED_10000_USXGMII FIELD_PREP(XGMAC_SPEED_M, 4)
-+#define XGMAC_SPEED_10000 FIELD_PREP(XGMAC_SPEED_M, 0)
-+#define XGMAC_SPEED_5000 FIELD_PREP(XGMAC_SPEED_M, 5)
-+#define XGMAC_SPEED_2500_USXGMII FIELD_PREP(XGMAC_SPEED_M, 6)
-+#define XGMAC_SPEED_2500 FIELD_PREP(XGMAC_SPEED_M, 2)
-+#define XGMAC_SPEED_1000 FIELD_PREP(XGMAC_SPEED_M, 3)
-+#define XGMAC_SPEED_100 XGMAC_SPEED_1000
-+#define XGMAC_SPEED_10 XGMAC_SPEED_1000
-+#define XGMAC_JD BIT(16)
-+#define XGMAC_TXEN BIT(0)
-+
-+/* XGMAC RX configuration register */
-+#define XGMAC_RX_CONFIG_ADDR 0x4
-+#define XGMAC_GPSL_M GENMASK(29, 16)
-+#define XGMAC_WD BIT(7)
-+#define XGMAC_GPSLEN BIT(6)
-+#define XGMAC_CST BIT(2)
-+#define XGMAC_ACS BIT(1)
-+#define XGMAC_RXEN BIT(0)
-+
-+#define XGMAC_RX_CONFIG_MASK \
-+ (XGMAC_GPSL_M | XGMAC_WD | XGMAC_GPSLEN | XGMAC_CST | \
-+ XGMAC_ACS | XGMAC_RXEN)
-+
-+/* XGMAC packet filter register */
-+#define XGMAC_PKT_FILTER_ADDR 0x8
-+#define XGMAC_RA BIT(31)
-+#define XGMAC_PCF_M GENMASK(7, 6)
-+#define XGMAC_PR BIT(0)
-+
-+#define XGMAC_PKT_FILTER_MASK \
-+ (XGMAC_RA | XGMAC_PCF_M | XGMAC_PR)
-+#define XGMAC_PKT_FILTER_VAL \
-+ (XGMAC_RA | XGMAC_PR | FIELD_PREP(XGMAC_PCF_M, 0x2))
-+
-+/* XGMAC watchdog timeout register */
-+#define XGMAC_WD_TIMEOUT_ADDR 0xc
-+#define XGMAC_PWE BIT(8)
-+#define XGMAC_WTO_M GENMASK(3, 0)
-+
-+#define XGMAC_WD_TIMEOUT_MASK \
-+ (XGMAC_PWE | XGMAC_WTO_M)
-+#define XGMAC_WD_TIMEOUT_VAL \
-+ (XGMAC_PWE | FIELD_PREP(XGMAC_WTO_M, 0xb))
-+
-+/* XGMAC TX flow control register */
-+#define XGMAC_TX_FLOW_CTRL_ADDR 0x70
-+#define XGMAC_PAUSE_TIME_M GENMASK(31, 16)
-+#define XGMAC_TXFCEN BIT(1)
-+
-+/* XGMAC RX flow control register */
-+#define XGMAC_RX_FLOW_CTRL_ADDR 0x90
-+#define XGMAC_RXFCEN BIT(0)
-+
-+/* XGMAC management counters control register */
-+#define XGMAC_MMC_CTRL_ADDR 0x800
-+#define XGMAC_MCF BIT(3)
-+#define XGMAC_CNTRST BIT(0)
-+
- #endif
+++ /dev/null
-From 3e8cb061bff0bf74503cd2f206ed5c599a1e7ff7 Mon Sep 17 00:00:00 2001
-From: Lei Wei <quic_leiwei@quicinc.com>
-Date: Thu, 29 Feb 2024 20:16:14 +0800
-Subject: [PATCH 33/50] net: ethernet: qualcomm: Add PPE port MAC MIB
- statistics functions
-
-Add PPE port MAC MIB statistics functions which are used by netdev
-ops and ethtool. For GMAC, a polling task is scheduled to read the
-MIB counters periodically to avoid 32bit register counter overflow.
-
-Change-Id: Ic20e240061278f77d703f652e1f7d959db8fac37
-Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 465 +++++++++++++++++++
- drivers/net/ethernet/qualcomm/ppe/ppe_port.h | 13 +
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 91 ++++
- 3 files changed, 569 insertions(+)
-
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
-@@ -23,6 +23,122 @@
- /* PPE BM port start for PPE MAC ports */
- #define PPE_BM_PORT_MAC_START 7
-
-+/* Poll interval time to poll GMAC MIBs for overflow protection,
-+ * the time should ensure that the 32bit GMAC packet counter
-+ * register would not overflow within this time at line rate
-+ * speed for 64B packet size.
-+ */
-+#define PPE_GMIB_POLL_INTERVAL_MS 120000
-+
-+#define PPE_MAC_MIB_DESC(_s, _o, _n) \
-+ { \
-+ .size = (_s), \
-+ .offset = (_o), \
-+ .name = (_n), \
-+ }
-+
-+/* PPE MAC MIB description */
-+struct ppe_mac_mib_info {
-+ u32 size;
-+ u32 offset;
-+ const char *name;
-+};
-+
-+/* PPE GMAC MIB statistics type */
-+enum ppe_gmib_stats_type {
-+ gmib_rx_broadcast,
-+ gmib_rx_pause,
-+ gmib_rx_multicast,
-+ gmib_rx_fcserr,
-+ gmib_rx_alignerr,
-+ gmib_rx_runt,
-+ gmib_rx_frag,
-+ gmib_rx_jumbofcserr,
-+ gmib_rx_jumboalignerr,
-+ gmib_rx_pkt64,
-+ gmib_rx_pkt65to127,
-+ gmib_rx_pkt128to255,
-+ gmib_rx_pkt256to511,
-+ gmib_rx_pkt512to1023,
-+ gmib_rx_pkt1024to1518,
-+ gmib_rx_pkt1519tomax,
-+ gmib_rx_toolong,
-+ gmib_rx_bytes_g,
-+ gmib_rx_bytes_b,
-+ gmib_rx_unicast,
-+ gmib_tx_broadcast,
-+ gmib_tx_pause,
-+ gmib_tx_multicast,
-+ gmib_tx_underrun,
-+ gmib_tx_pkt64,
-+ gmib_tx_pkt65to127,
-+ gmib_tx_pkt128to255,
-+ gmib_tx_pkt256to511,
-+ gmib_tx_pkt512to1023,
-+ gmib_tx_pkt1024to1518,
-+ gmib_tx_pkt1519tomax,
-+ gmib_tx_bytes,
-+ gmib_tx_collisions,
-+ gmib_tx_abortcol,
-+ gmib_tx_multicol,
-+ gmib_tx_singlecol,
-+ gmib_tx_excdeffer,
-+ gmib_tx_deffer,
-+ gmib_tx_latecol,
-+ gmib_tx_unicast,
-+};
-+
-+/* PPE XGMAC MIB statistics type */
-+enum ppe_xgmib_stats_type {
-+ xgmib_tx_bytes,
-+ xgmib_tx_frames,
-+ xgmib_tx_broadcast_g,
-+ xgmib_tx_multicast_g,
-+ xgmib_tx_pkt64,
-+ xgmib_tx_pkt65to127,
-+ xgmib_tx_pkt128to255,
-+ xgmib_tx_pkt256to511,
-+ xgmib_tx_pkt512to1023,
-+ xgmib_tx_pkt1024tomax,
-+ xgmib_tx_unicast,
-+ xgmib_tx_multicast,
-+ xgmib_tx_broadcast,
-+ xgmib_tx_underflow_err,
-+ xgmib_tx_bytes_g,
-+ xgmib_tx_frames_g,
-+ xgmib_tx_pause,
-+ xgmib_tx_vlan_g,
-+ xgmib_tx_lpi_usec,
-+ xgmib_tx_lpi_tran,
-+ xgmib_rx_frames,
-+ xgmib_rx_bytes,
-+ xgmib_rx_bytes_g,
-+ xgmib_rx_broadcast_g,
-+ xgmib_rx_multicast_g,
-+ xgmib_rx_crc_err,
-+ xgmib_rx_runt_err,
-+ xgmib_rx_jabber_err,
-+ xgmib_rx_undersize_g,
-+ xgmib_rx_oversize_g,
-+ xgmib_rx_pkt64,
-+ xgmib_rx_pkt65to127,
-+ xgmib_rx_pkt128to255,
-+ xgmib_rx_pkt256to511,
-+ xgmib_rx_pkt512to1023,
-+ xgmib_rx_pkt1024tomax,
-+ xgmib_rx_unicast_g,
-+ xgmib_rx_len_err,
-+ xgmib_rx_outofrange_err,
-+ xgmib_rx_pause,
-+ xgmib_rx_fifo_overflow,
-+ xgmib_rx_vlan,
-+ xgmib_rx_wdog_err,
-+ xgmib_rx_lpi_usec,
-+ xgmib_rx_lpi_tran,
-+ xgmib_rx_drop_frames,
-+ xgmib_rx_drop_bytes,
-+};
-+
- /* PPE port clock and reset name */
- static const char * const ppe_port_clk_rst_name[] = {
- [PPE_PORT_CLK_RST_MAC] = "port_mac",
-@@ -30,6 +146,322 @@ static const char * const ppe_port_clk_r
- [PPE_PORT_CLK_RST_TX] = "port_tx",
- };
-
-+/* PPE GMAC MIB statistics description information */
-+static const struct ppe_mac_mib_info gmib_info[] = {
-+ PPE_MAC_MIB_DESC(4, GMAC_RXBROAD_ADDR, "rx_broadcast"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXPAUSE_ADDR, "rx_pause"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXMULTI_ADDR, "rx_multicast"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXFCSERR_ADDR, "rx_fcserr"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXALIGNERR_ADDR, "rx_alignerr"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXRUNT_ADDR, "rx_runt"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXFRAG_ADDR, "rx_frag"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXJUMBOFCSERR_ADDR, "rx_jumbofcserr"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXJUMBOALIGNERR_ADDR, "rx_jumboalignerr"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT64_ADDR, "rx_pkt64"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT65TO127_ADDR, "rx_pkt65to127"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT128TO255_ADDR, "rx_pkt128to255"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT256TO511_ADDR, "rx_pkt256to511"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT512TO1023_ADDR, "rx_pkt512to1023"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT1024TO1518_ADDR, "rx_pkt1024to1518"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT1519TOX_ADDR, "rx_pkt1519tomax"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXTOOLONG_ADDR, "rx_toolong"),
-+ PPE_MAC_MIB_DESC(8, GMAC_RXBYTE_G_ADDR, "rx_bytes_g"),
-+ PPE_MAC_MIB_DESC(8, GMAC_RXBYTE_B_ADDR, "rx_bytes_b"),
-+ PPE_MAC_MIB_DESC(4, GMAC_RXUNI_ADDR, "rx_unicast"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXBROAD_ADDR, "tx_broadcast"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXPAUSE_ADDR, "tx_pause"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXMULTI_ADDR, "tx_multicast"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXUNDERRUN_ADDR, "tx_underrun"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT64_ADDR, "tx_pkt64"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT65TO127_ADDR, "tx_pkt65to127"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT128TO255_ADDR, "tx_pkt128to255"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT256TO511_ADDR, "tx_pkt256to511"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT512TO1023_ADDR, "tx_pkt512to1023"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT1024TO1518_ADDR, "tx_pkt1024to1518"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT1519TOX_ADDR, "tx_pkt1519tomax"),
-+ PPE_MAC_MIB_DESC(8, GMAC_TXBYTE_ADDR, "tx_bytes"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXCOLLISIONS_ADDR, "tx_collisions"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXABORTCOL_ADDR, "tx_abortcol"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXMULTICOL_ADDR, "tx_multicol"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXSINGLECOL_ADDR, "tx_singlecol"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXEXCESSIVEDEFER_ADDR, "tx_excdeffer"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXDEFER_ADDR, "tx_deffer"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXLATECOL_ADDR, "tx_latecol"),
-+ PPE_MAC_MIB_DESC(4, GMAC_TXUNI_ADDR, "tx_unicast"),
-+};
-+
-+/* PPE XGMAC MIB statistics description information */
-+static const struct ppe_mac_mib_info xgmib_info[] = {
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXBYTE_GB_ADDR, "tx_bytes"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT_GB_ADDR, "tx_frames"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXBROAD_G_ADDR, "tx_broadcast_g"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXMULTI_G_ADDR, "tx_multicast_g"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT64_GB_ADDR, "tx_pkt64"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT65TO127_GB_ADDR, "tx_pkt65to127"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT128TO255_GB_ADDR, "tx_pkt128to255"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT256TO511_GB_ADDR, "tx_pkt256to511"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT512TO1023_GB_ADDR, "tx_pkt512to1023"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT1024TOMAX_GB_ADDR, "tx_pkt1024tomax"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXUNI_GB_ADDR, "tx_unicast"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXMULTI_GB_ADDR, "tx_multicast"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXBROAD_GB_ADDR, "tx_broadcast"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXUNDERFLOW_ERR_ADDR, "tx_underflow_err"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXBYTE_G_ADDR, "tx_bytes_g"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT_G_ADDR, "tx_frames_g"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXPAUSE_ADDR, "tx_pause"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_TXVLAN_G_ADDR, "tx_vlan_g"),
-+ PPE_MAC_MIB_DESC(4, XGMAC_TXLPI_USEC_ADDR, "tx_lpi_usec"),
-+ PPE_MAC_MIB_DESC(4, XGMAC_TXLPI_TRAN_ADDR, "tx_lpi_tran"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT_GB_ADDR, "rx_frames"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXBYTE_GB_ADDR, "rx_bytes"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXBYTE_G_ADDR, "rx_bytes_g"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXBROAD_G_ADDR, "rx_broadcast_g"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXMULTI_G_ADDR, "rx_multicast_g"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXCRC_ERR_ADDR, "rx_crc_err"),
-+ PPE_MAC_MIB_DESC(4, XGMAC_RXRUNT_ERR_ADDR, "rx_runt_err"),
-+ PPE_MAC_MIB_DESC(4, XGMAC_RXJABBER_ERR_ADDR, "rx_jabber_err"),
-+ PPE_MAC_MIB_DESC(4, XGMAC_RXUNDERSIZE_G_ADDR, "rx_undersize_g"),
-+ PPE_MAC_MIB_DESC(4, XGMAC_RXOVERSIZE_G_ADDR, "rx_oversize_g"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT64_GB_ADDR, "rx_pkt64"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT65TO127_GB_ADDR, "rx_pkt65to127"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT128TO255_GB_ADDR, "rx_pkt128to255"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT256TO511_GB_ADDR, "rx_pkt256to511"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT512TO1023_GB_ADDR, "rx_pkt512to1023"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT1024TOMAX_GB_ADDR, "rx_pkt1024tomax"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXUNI_G_ADDR, "rx_unicast_g"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXLEN_ERR_ADDR, "rx_len_err"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXOUTOFRANGE_ADDR, "rx_outofrange_err"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXPAUSE_ADDR, "rx_pause"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXFIFOOVERFLOW_ADDR, "rx_fifo_overflow"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXVLAN_GB_ADDR, "rx_vlan"),
-+ PPE_MAC_MIB_DESC(4, XGMAC_RXWATCHDOG_ERR_ADDR, "rx_wdog_err"),
-+ PPE_MAC_MIB_DESC(4, XGMAC_RXLPI_USEC_ADDR, "rx_lpi_usec"),
-+ PPE_MAC_MIB_DESC(4, XGMAC_RXLPI_TRAN_ADDR, "rx_lpi_tran"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXDISCARD_GB_ADDR, "rx_drop_frames"),
-+ PPE_MAC_MIB_DESC(8, XGMAC_RXDISCARDBYTE_GB_ADDR, "rx_drop_bytes"),
-+};
-+
-+/* Get GMAC MIBs from registers and accumulate to PPE port GMIB stats array */
-+static void ppe_port_gmib_update(struct ppe_port *ppe_port)
-+{
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ const struct ppe_mac_mib_info *mib;
-+ int port = ppe_port->port_id;
-+ u32 reg, val;
-+ int i, ret;
-+
-+ for (i = 0; i < ARRAY_SIZE(gmib_info); i++) {
-+ mib = &gmib_info[i];
-+ reg = PPE_PORT_GMAC_ADDR(port) + mib->offset;
-+
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret) {
-+ dev_warn(ppe_dev->dev, "%s: %d\n", __func__, ret);
-+ continue;
-+ }
-+
-+ ppe_port->gmib_stats[i] += val;
-+ if (mib->size == 8) {
-+ ret = regmap_read(ppe_dev->regmap, reg + 4, &val);
-+ if (ret) {
-+ dev_warn(ppe_dev->dev, "%s: %d\n",
-+ __func__, ret);
-+ continue;
-+ }
-+
-+ ppe_port->gmib_stats[i] += (u64)val << 32;
-+ }
-+ }
-+}
-+
-+/* Polling task to read GMIB statistics to avoid GMIB 32bit register overflow */
-+static void ppe_port_gmib_stats_poll(struct work_struct *work)
-+{
-+ struct ppe_port *ppe_port = container_of(work, struct ppe_port,
-+ gmib_read.work);
-+ spin_lock(&ppe_port->gmib_stats_lock);
-+ ppe_port_gmib_update(ppe_port);
-+ spin_unlock(&ppe_port->gmib_stats_lock);
-+
-+ schedule_delayed_work(&ppe_port->gmib_read,
-+ msecs_to_jiffies(PPE_GMIB_POLL_INTERVAL_MS));
-+}
-+
-+/* Get the XGMAC MIB counter based on the specific MIB stats type */
-+static u64 ppe_port_xgmib_get(struct ppe_port *ppe_port,
-+ enum ppe_xgmib_stats_type xgmib_type)
-+{
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ const struct ppe_mac_mib_info *mib;
-+ int port = ppe_port->port_id;
-+ u32 reg, val;
-+ u64 data = 0;
-+ int ret;
-+
-+ mib = &xgmib_info[xgmib_type];
-+ reg = PPE_PORT_XGMAC_ADDR(port) + mib->offset;
-+
-+ ret = regmap_read(ppe_dev->regmap, reg, &val);
-+ if (ret) {
-+ dev_warn(ppe_dev->dev, "%s: %d\n", __func__, ret);
-+ goto data_return;
-+ }
-+
-+ data = val;
-+ if (mib->size == 8) {
-+ ret = regmap_read(ppe_dev->regmap, reg + 4, &val);
-+ if (ret) {
-+ dev_warn(ppe_dev->dev, "%s: %d\n", __func__, ret);
-+ goto data_return;
-+ }
-+
-+ data |= (u64)val << 32;
-+ }
-+
-+data_return:
-+ return data;
-+}
-+
-+/**
-+ * ppe_port_get_sset_count() - Get PPE port statistics string count
-+ * @ppe_port: PPE port
-+ * @sset: string set ID
-+ *
-+ * Description: Get the MAC statistics string count for the PPE port
-+ * specified by @ppe_port.
-+ *
-+ * Return: The count of the statistics string.
-+ */
-+int ppe_port_get_sset_count(struct ppe_port *ppe_port, int sset)
-+{
-+ if (sset != ETH_SS_STATS)
-+ return 0;
-+
-+ if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC)
-+ return ARRAY_SIZE(gmib_info);
-+ else
-+ return ARRAY_SIZE(xgmib_info);
-+}
-+
-+/**
-+ * ppe_port_get_strings() - Get PPE port statistics strings
-+ * @ppe_port: PPE port
-+ * @stringset: string set ID
-+ * @data: pointer to statistics strings
-+ *
-+ * Description: Get the MAC statistics stings for the PPE port
-+ * specified by @ppe_port. The strings are stored in the buffer
-+ * indicated by @data which used in the ethtool ops.
-+ */
-+void ppe_port_get_strings(struct ppe_port *ppe_port, u32 stringset, u8 *data)
-+{
-+ int i;
-+
-+ if (stringset != ETH_SS_STATS)
-+ return;
-+
-+ if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC) {
-+ for (i = 0; i < ARRAY_SIZE(gmib_info); i++)
-+ strscpy(data + i * ETH_GSTRING_LEN, gmib_info[i].name,
-+ ETH_GSTRING_LEN);
-+ } else {
-+ for (i = 0; i < ARRAY_SIZE(xgmib_info); i++)
-+ strscpy(data + i * ETH_GSTRING_LEN, xgmib_info[i].name,
-+ ETH_GSTRING_LEN);
-+ }
-+}
-+
-+/**
-+ * ppe_port_get_ethtool_stats() - Get PPE port ethtool statistics
-+ * @ppe_port: PPE port
-+ * @data: pointer to statistics data
-+ *
-+ * Description: Get the MAC statistics for the PPE port specified
-+ * by @ppe_port. The statistics are stored in the buffer indicated
-+ * by @data which used in the ethtool ops.
-+ */
-+void ppe_port_get_ethtool_stats(struct ppe_port *ppe_port, u64 *data)
-+{
-+ int i;
-+
-+ if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC) {
-+ spin_lock(&ppe_port->gmib_stats_lock);
-+
-+ ppe_port_gmib_update(ppe_port);
-+ for (i = 0; i < ARRAY_SIZE(gmib_info); i++)
-+ data[i] = ppe_port->gmib_stats[i];
-+
-+ spin_unlock(&ppe_port->gmib_stats_lock);
-+ } else {
-+ for (i = 0; i < ARRAY_SIZE(xgmib_info); i++)
-+ data[i] = ppe_port_xgmib_get(ppe_port, i);
-+ }
-+}
-+
-+/**
-+ * ppe_port_get_stats64() - Get PPE port statistics
-+ * @ppe_port: PPE port
-+ * @s: statistics pointer
-+ *
-+ * Description: Get the MAC statistics for the PPE port specified
-+ * by @ppe_port.
-+ */
-+void ppe_port_get_stats64(struct ppe_port *ppe_port,
-+ struct rtnl_link_stats64 *s)
-+{
-+ if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC) {
-+ u64 *src = ppe_port->gmib_stats;
-+
-+ spin_lock(&ppe_port->gmib_stats_lock);
-+
-+ ppe_port_gmib_update(ppe_port);
-+
-+ s->rx_packets = src[gmib_rx_unicast] +
-+ src[gmib_rx_broadcast] + src[gmib_rx_multicast];
-+
-+ s->tx_packets = src[gmib_tx_unicast] +
-+ src[gmib_tx_broadcast] + src[gmib_tx_multicast];
-+
-+ s->rx_bytes = src[gmib_rx_bytes_g];
-+ s->tx_bytes = src[gmib_tx_bytes];
-+ s->multicast = src[gmib_rx_multicast];
-+
-+ s->rx_crc_errors = src[gmib_rx_fcserr] + src[gmib_rx_frag];
-+ s->rx_frame_errors = src[gmib_rx_alignerr];
-+ s->rx_errors = s->rx_crc_errors + s->rx_frame_errors;
-+ s->rx_dropped = src[gmib_rx_toolong] + s->rx_errors;
-+
-+ s->tx_fifo_errors = src[gmib_tx_underrun];
-+ s->tx_aborted_errors = src[gmib_tx_abortcol];
-+ s->tx_errors = s->tx_fifo_errors + s->tx_aborted_errors;
-+ s->collisions = src[gmib_tx_collisions];
-+
-+ spin_unlock(&ppe_port->gmib_stats_lock);
-+ } else {
-+ s->multicast = ppe_port_xgmib_get(ppe_port, xgmib_rx_multicast_g);
-+
-+ s->rx_packets = s->multicast;
-+ s->rx_packets += ppe_port_xgmib_get(ppe_port, xgmib_rx_unicast_g);
-+ s->rx_packets += ppe_port_xgmib_get(ppe_port, xgmib_rx_broadcast_g);
-+
-+ s->tx_packets = ppe_port_xgmib_get(ppe_port, xgmib_tx_frames);
-+ s->rx_bytes = ppe_port_xgmib_get(ppe_port, xgmib_rx_bytes);
-+ s->tx_bytes = ppe_port_xgmib_get(ppe_port, xgmib_tx_bytes);
-+
-+ s->rx_crc_errors = ppe_port_xgmib_get(ppe_port, xgmib_rx_crc_err);
-+ s->rx_fifo_errors = ppe_port_xgmib_get(ppe_port, xgmib_rx_fifo_overflow);
-+
-+ s->rx_length_errors = ppe_port_xgmib_get(ppe_port, xgmib_rx_len_err);
-+ s->rx_errors = s->rx_crc_errors +
-+ s->rx_fifo_errors + s->rx_length_errors;
-+ s->rx_dropped = s->rx_errors;
-+
-+ s->tx_fifo_errors = ppe_port_xgmib_get(ppe_port, xgmib_tx_underflow_err);
-+ s->tx_errors = s->tx_packets -
-+ ppe_port_xgmib_get(ppe_port, xgmib_tx_frames_g);
-+ }
-+}
-+
- /* PPE port and MAC reset */
- static int ppe_port_mac_reset(struct ppe_port *ppe_port)
- {
-@@ -261,6 +693,9 @@ static void ppe_port_mac_link_up(struct
- int ret, port = ppe_port->port_id;
- u32 reg, val;
-
-+ /* Start GMIB statistics polling */
-+ schedule_delayed_work(&ppe_port->gmib_read, 0);
-+
- if (mac_type == PPE_MAC_TYPE_GMAC)
- ret = ppe_port_gmac_link_up(ppe_port,
- speed, duplex, tx_pause, rx_pause);
-@@ -306,6 +741,9 @@ static void ppe_port_mac_link_down(struc
- int ret, port = ppe_port->port_id;
- u32 reg;
-
-+ /* Stop GMIB statistics polling */
-+ cancel_delayed_work_sync(&ppe_port->gmib_read);
-+
- /* Disable PPE port TX */
- reg = PPE_PORT_BRIDGE_CTRL_ADDR + PPE_PORT_BRIDGE_CTRL_INC * port;
- ret = regmap_update_bits(ppe_dev->regmap, reg,
-@@ -627,6 +1065,27 @@ static int ppe_port_mac_hw_init(struct p
- return ret;
- }
-
-+/* PPE port MAC MIB work task initialization */
-+static int ppe_port_mac_mib_work_init(struct ppe_port *ppe_port)
-+{
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ u64 *gstats;
-+
-+ gstats = devm_kzalloc(ppe_dev->dev,
-+ sizeof(*gstats) * ARRAY_SIZE(gmib_info),
-+ GFP_KERNEL);
-+ if (!gstats)
-+ return -ENOMEM;
-+
-+ ppe_port->gmib_stats = gstats;
-+
-+ spin_lock_init(&ppe_port->gmib_stats_lock);
-+ INIT_DELAYED_WORK(&ppe_port->gmib_read,
-+ ppe_port_gmib_stats_poll);
-+
-+ return 0;
-+}
-+
- /**
- * ppe_port_mac_init() - Initialization of PPE ports for the PPE device
- * @ppe_dev: PPE device
-@@ -693,6 +1152,12 @@ int ppe_port_mac_init(struct ppe_device
- goto err_port_node;
- }
-
-+ ret = ppe_port_mac_mib_work_init(&ppe_ports->port[i]);
-+ if (ret) {
-+ dev_err(ppe_dev->dev, "Failed to initialize MAC MIB work\n");
-+ goto err_port_node;
-+ }
-+
- i++;
- }
-
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
-@@ -8,6 +8,8 @@
-
- #include <linux/phylink.h>
-
-+struct rtnl_link_stats64;
-+
- /**
- * enum ppe_port_clk_rst_type - PPE port clock and reset ID type
- * @PPE_PORT_CLK_RST_MAC: The clock and reset ID for port MAC
-@@ -44,6 +46,9 @@ enum ppe_mac_type {
- * @port_id: Port ID
- * @clks: Port clocks
- * @rstcs: Port resets
-+ * @gmib_read: Delay work task for GMAC MIB statistics polling function
-+ * @gmib_stats: GMAC MIB statistics array
-+ * @gmib_stats_lock: Lock to protect GMAC MIB statistics
- */
- struct ppe_port {
- struct phylink *phylink;
-@@ -56,6 +61,9 @@ struct ppe_port {
- int port_id;
- struct clk *clks[PPE_PORT_CLK_RST_MAX];
- struct reset_control *rstcs[PPE_PORT_CLK_RST_MAX];
-+ struct delayed_work gmib_read;
-+ u64 *gmib_stats;
-+ spinlock_t gmib_stats_lock; /* Protects GMIB stats */
- };
-
- /**
-@@ -73,4 +81,9 @@ void ppe_port_mac_deinit(struct ppe_devi
- int ppe_port_phylink_setup(struct ppe_port *ppe_port,
- struct net_device *netdev);
- void ppe_port_phylink_destroy(struct ppe_port *ppe_port);
-+int ppe_port_get_sset_count(struct ppe_port *ppe_port, int sset);
-+void ppe_port_get_strings(struct ppe_port *ppe_port, u32 stringset, u8 *data);
-+void ppe_port_get_ethtool_stats(struct ppe_port *ppe_port, u64 *data);
-+void ppe_port_get_stats64(struct ppe_port *ppe_port,
-+ struct rtnl_link_stats64 *s);
- #endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -606,6 +606,48 @@
- #define GMAC_MIB_CTRL_MASK \
- (GMAC_MIB_RD_CLR | GMAC_MIB_RST | GMAC_MIB_EN)
-
-+/* GMAC MIB counter registers */
-+#define GMAC_RXBROAD_ADDR 0x40
-+#define GMAC_RXPAUSE_ADDR 0x44
-+#define GMAC_RXMULTI_ADDR 0x48
-+#define GMAC_RXFCSERR_ADDR 0x4C
-+#define GMAC_RXALIGNERR_ADDR 0x50
-+#define GMAC_RXRUNT_ADDR 0x54
-+#define GMAC_RXFRAG_ADDR 0x58
-+#define GMAC_RXJUMBOFCSERR_ADDR 0x5C
-+#define GMAC_RXJUMBOALIGNERR_ADDR 0x60
-+#define GMAC_RXPKT64_ADDR 0x64
-+#define GMAC_RXPKT65TO127_ADDR 0x68
-+#define GMAC_RXPKT128TO255_ADDR 0x6C
-+#define GMAC_RXPKT256TO511_ADDR 0x70
-+#define GMAC_RXPKT512TO1023_ADDR 0x74
-+#define GMAC_RXPKT1024TO1518_ADDR 0x78
-+#define GMAC_RXPKT1519TOX_ADDR 0x7C
-+#define GMAC_RXTOOLONG_ADDR 0x80
-+#define GMAC_RXBYTE_G_ADDR 0x84
-+#define GMAC_RXBYTE_B_ADDR 0x8C
-+#define GMAC_RXUNI_ADDR 0x94
-+#define GMAC_TXBROAD_ADDR 0xA0
-+#define GMAC_TXPAUSE_ADDR 0xA4
-+#define GMAC_TXMULTI_ADDR 0xA8
-+#define GMAC_TXUNDERRUN_ADDR 0xAC
-+#define GMAC_TXPKT64_ADDR 0xB0
-+#define GMAC_TXPKT65TO127_ADDR 0xB4
-+#define GMAC_TXPKT128TO255_ADDR 0xB8
-+#define GMAC_TXPKT256TO511_ADDR 0xBC
-+#define GMAC_TXPKT512TO1023_ADDR 0xC0
-+#define GMAC_TXPKT1024TO1518_ADDR 0xC4
-+#define GMAC_TXPKT1519TOX_ADDR 0xC8
-+#define GMAC_TXBYTE_ADDR 0xCC
-+#define GMAC_TXCOLLISIONS_ADDR 0xD4
-+#define GMAC_TXABORTCOL_ADDR 0xD8
-+#define GMAC_TXMULTICOL_ADDR 0xDC
-+#define GMAC_TXSINGLECOL_ADDR 0xE0
-+#define GMAC_TXEXCESSIVEDEFER_ADDR 0xE4
-+#define GMAC_TXDEFER_ADDR 0xE8
-+#define GMAC_TXLATECOL_ADDR 0xEC
-+#define GMAC_TXUNI_ADDR 0xF0
-+
- /* XGMAC TX configuration register */
- #define XGMAC_TX_CONFIG_ADDR 0x0
- #define XGMAC_SPEED_M GENMASK(31, 29)
-@@ -668,4 +710,53 @@
- #define XGMAC_MCF BIT(3)
- #define XGMAC_CNTRST BIT(0)
-
-+/* XGMAC MIB counter registers */
-+#define XGMAC_TXBYTE_GB_ADDR 0x814
-+#define XGMAC_TXPKT_GB_ADDR 0x81C
-+#define XGMAC_TXBROAD_G_ADDR 0x824
-+#define XGMAC_TXMULTI_G_ADDR 0x82C
-+#define XGMAC_TXPKT64_GB_ADDR 0x834
-+#define XGMAC_TXPKT65TO127_GB_ADDR 0x83C
-+#define XGMAC_TXPKT128TO255_GB_ADDR 0x844
-+#define XGMAC_TXPKT256TO511_GB_ADDR 0x84C
-+#define XGMAC_TXPKT512TO1023_GB_ADDR 0x854
-+#define XGMAC_TXPKT1024TOMAX_GB_ADDR 0x85C
-+#define XGMAC_TXUNI_GB_ADDR 0x864
-+#define XGMAC_TXMULTI_GB_ADDR 0x86C
-+#define XGMAC_TXBROAD_GB_ADDR 0x874
-+#define XGMAC_TXUNDERFLOW_ERR_ADDR 0x87C
-+#define XGMAC_TXBYTE_G_ADDR 0x884
-+#define XGMAC_TXPKT_G_ADDR 0x88C
-+#define XGMAC_TXPAUSE_ADDR 0x894
-+#define XGMAC_TXVLAN_G_ADDR 0x89C
-+#define XGMAC_TXLPI_USEC_ADDR 0x8A4
-+#define XGMAC_TXLPI_TRAN_ADDR 0x8A8
-+#define XGMAC_RXPKT_GB_ADDR 0x900
-+#define XGMAC_RXBYTE_GB_ADDR 0x908
-+#define XGMAC_RXBYTE_G_ADDR 0x910
-+#define XGMAC_RXBROAD_G_ADDR 0x918
-+#define XGMAC_RXMULTI_G_ADDR 0x920
-+#define XGMAC_RXCRC_ERR_ADDR 0x928
-+#define XGMAC_RXRUNT_ERR_ADDR 0x930
-+#define XGMAC_RXJABBER_ERR_ADDR 0x934
-+#define XGMAC_RXUNDERSIZE_G_ADDR 0x938
-+#define XGMAC_RXOVERSIZE_G_ADDR 0x93C
-+#define XGMAC_RXPKT64_GB_ADDR 0x940
-+#define XGMAC_RXPKT65TO127_GB_ADDR 0x948
-+#define XGMAC_RXPKT128TO255_GB_ADDR 0x950
-+#define XGMAC_RXPKT256TO511_GB_ADDR 0x958
-+#define XGMAC_RXPKT512TO1023_GB_ADDR 0x960
-+#define XGMAC_RXPKT1024TOMAX_GB_ADDR 0x968
-+#define XGMAC_RXUNI_G_ADDR 0x970
-+#define XGMAC_RXLEN_ERR_ADDR 0x978
-+#define XGMAC_RXOUTOFRANGE_ADDR 0x980
-+#define XGMAC_RXPAUSE_ADDR 0x988
-+#define XGMAC_RXFIFOOVERFLOW_ADDR 0x990
-+#define XGMAC_RXVLAN_GB_ADDR 0x998
-+#define XGMAC_RXWATCHDOG_ERR_ADDR 0x9A0
-+#define XGMAC_RXLPI_USEC_ADDR 0x9A4
-+#define XGMAC_RXLPI_TRAN_ADDR 0x9A8
-+#define XGMAC_RXDISCARD_GB_ADDR 0x9AC
-+#define XGMAC_RXDISCARDBYTE_GB_ADDR 0x9B4
-+
- #endif
+++ /dev/null
-From 172dc9a0d7704051c63407af6b39939c43801a99 Mon Sep 17 00:00:00 2001
-From: Lei Wei <quic_leiwei@quicinc.com>
-Date: Fri, 1 Mar 2024 13:36:26 +0800
-Subject: [PATCH 34/50] net: ethernet: qualcomm: Add PPE port MAC address and
- EEE functions
-
-Add PPE port MAC address set and EEE set API functions which
-will be used by netdev ops and ethtool.
-
-Change-Id: Id2b3b06ae940b3b6f5227d927316329cdf3caeaa
-Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 75 ++++++++++++++++++++
- drivers/net/ethernet/qualcomm/ppe/ppe_port.h | 3 +
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 29 ++++++++
- 3 files changed, 107 insertions(+)
-
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
-@@ -462,6 +462,81 @@ void ppe_port_get_stats64(struct ppe_por
- }
- }
-
-+/**
-+ * ppe_port_set_mac_address() - Set PPE port MAC address
-+ * @ppe_port: PPE port
-+ * @addr: MAC address
-+ *
-+ * Description: Set MAC address for the given PPE port.
-+ *
-+ * Return: 0 upon success or a negative error upon failure.
-+ */
-+int ppe_port_set_mac_address(struct ppe_port *ppe_port, const u8 *addr)
-+{
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ int port = ppe_port->port_id;
-+ u32 reg, val;
-+ int ret;
-+
-+ if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC) {
-+ reg = PPE_PORT_GMAC_ADDR(port);
-+ val = (addr[5] << 8) | addr[4];
-+ ret = regmap_write(ppe_dev->regmap, reg + GMAC_GOL_ADDR0_ADDR, val);
-+ if (ret)
-+ return ret;
-+
-+ val = (addr[0] << 24) | (addr[1] << 16) |
-+ (addr[2] << 8) | addr[3];
-+ ret = regmap_write(ppe_dev->regmap, reg + GMAC_GOL_ADDR1_ADDR, val);
-+ if (ret)
-+ return ret;
-+ } else {
-+ reg = PPE_PORT_XGMAC_ADDR(port);
-+ val = (addr[5] << 8) | addr[4] | XGMAC_ADDR_EN;
-+ ret = regmap_write(ppe_dev->regmap, reg + XGMAC_ADDR0_H_ADDR, val);
-+ if (ret)
-+ return ret;
-+
-+ val = (addr[3] << 24) | (addr[2] << 16) |
-+ (addr[1] << 8) | addr[0];
-+ ret = regmap_write(ppe_dev->regmap, reg + XGMAC_ADDR0_L_ADDR, val);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * ppe_port_set_mac_eee() - Set EEE configuration for PPE port MAC
-+ * @ppe_port: PPE port
-+ * @eee: EEE settings
-+ *
-+ * Description: Set port MAC EEE settings for the given PPE port.
-+ *
-+ * Return: 0 upon success or a negative error upon failure.
-+ */
-+int ppe_port_set_mac_eee(struct ppe_port *ppe_port, struct ethtool_eee *eee)
-+{
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ int port = ppe_port->port_id;
-+ u32 val;
-+ int ret;
-+
-+ ret = regmap_read(ppe_dev->regmap, PPE_LPI_EN_ADDR, &val);
-+ if (ret)
-+ return ret;
-+
-+ if (eee->tx_lpi_enabled)
-+ val |= PPE_LPI_PORT_EN(port);
-+ else
-+ val &= ~PPE_LPI_PORT_EN(port);
-+
-+ ret = regmap_write(ppe_dev->regmap, PPE_LPI_EN_ADDR, val);
-+
-+ return ret;
-+}
-+
- /* PPE port and MAC reset */
- static int ppe_port_mac_reset(struct ppe_port *ppe_port)
- {
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
-@@ -8,6 +8,7 @@
-
- #include <linux/phylink.h>
-
-+struct ethtool_eee;
- struct rtnl_link_stats64;
-
- /**
-@@ -86,4 +87,6 @@ void ppe_port_get_strings(struct ppe_por
- void ppe_port_get_ethtool_stats(struct ppe_port *ppe_port, u64 *data);
- void ppe_port_get_stats64(struct ppe_port *ppe_port,
- struct rtnl_link_stats64 *s);
-+int ppe_port_set_mac_address(struct ppe_port *ppe_port, const u8 *addr);
-+int ppe_port_set_mac_eee(struct ppe_port *ppe_port, struct ethtool_eee *eee);
- #endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -18,6 +18,16 @@
- #define PPE_PORT5_SEL_PCS1 BIT(4)
- #define PPE_PORT_SEL_XGMAC(x) (BIT(8) << ((x) - 1))
-
-+/* PPE port LPI enable register */
-+#define PPE_LPI_EN_ADDR 0x400
-+#define PPE_LPI_PORT1_EN BIT(0)
-+#define PPE_LPI_PORT2_EN BIT(1)
-+#define PPE_LPI_PORT3_EN BIT(2)
-+#define PPE_LPI_PORT4_EN BIT(3)
-+#define PPE_LPI_PORT5_EN BIT(4)
-+#define PPE_LPI_PORT6_EN BIT(5)
-+#define PPE_LPI_PORT_EN(x) (BIT(0) << ((x) - 1))
-+
- /* There are 15 BM ports and 4 BM groups supported by PPE,
- * BM port (0-7) is matched to EDMA port 0, BM port (8-13) is matched
- * to PPE physical port 1-6, BM port 14 is matched to EIP.
-@@ -580,6 +590,17 @@
- #define GMAC_SPEED_100 1
- #define GMAC_SPEED_1000 2
-
-+/* GMAC MAC address register */
-+#define GMAC_GOL_ADDR0_ADDR 0x8
-+#define GMAC_ADDR_BYTE5 GENMASK(15, 8)
-+#define GMAC_ADDR_BYTE4 GENMASK(7, 0)
-+
-+#define GMAC_GOL_ADDR1_ADDR 0xC
-+#define GMAC_ADDR_BYTE0 GENMASK(31, 24)
-+#define GMAC_ADDR_BYTE1 GENMASK(23, 16)
-+#define GMAC_ADDR_BYTE2 GENMASK(15, 8)
-+#define GMAC_ADDR_BYTE3 GENMASK(7, 0)
-+
- /* GMAC control register */
- #define GMAC_CTRL_ADDR 0x18
- #define GMAC_TX_THD_M GENMASK(27, 24)
-@@ -705,6 +726,14 @@
- #define XGMAC_RX_FLOW_CTRL_ADDR 0x90
- #define XGMAC_RXFCEN BIT(0)
-
-+/* XGMAC MAC address register */
-+#define XGMAC_ADDR0_H_ADDR 0x300
-+#define XGMAC_ADDR_EN BIT(31)
-+#define XGMAC_ADDRH GENMASK(15, 0)
-+
-+#define XGMAC_ADDR0_L_ADDR 0x304
-+#define XGMAC_ADDRL GENMASK(31, 0)
-+
- /* XGMAC management counters control register */
- #define XGMAC_MMC_CTRL_ADDR 0x800
- #define XGMAC_MCF BIT(3)
+++ /dev/null
-From cf3e71b3c8bd63cd832c0512386700cac6a2c363 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Tue, 5 Mar 2024 16:42:56 +0800
-Subject: [PATCH 35/50] net: ethernet: qualcomm: Add API to configure PPE port
- max frame size
-
-This function is called when the MTU of an ethernet port is
-configured. It limits the size of packet passed through the
-ethernet port.
-
-Change-Id: I2a4dcd04407156d73770d2becbb7cbc0d56b3754
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 44 ++++++++++++++++++++
- drivers/net/ethernet/qualcomm/ppe/ppe_port.h | 1 +
- 2 files changed, 45 insertions(+)
-
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
-@@ -537,6 +537,50 @@ int ppe_port_set_mac_eee(struct ppe_port
- return ret;
- }
-
-+/**
-+ * ppe_port_set_maxframe() - Set port maximum frame size
-+ * @ppe_port: PPE port structure
-+ * @maxframe_size: Maximum frame size supported by PPE port
-+ *
-+ * Description: Set MTU of network interface specified by @ppe_port.
-+ *
-+ * Return: 0 upon success or a negative error upon failure.
-+ */
-+int ppe_port_set_maxframe(struct ppe_port *ppe_port, int maxframe_size)
-+{
-+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
-+ u32 reg, val, mru_mtu_val[3];
-+ int port = ppe_port->port_id;
-+ int ret;
-+
-+ /* The max frame size should be MTU added by ETH_HLEN in PPE. */
-+ maxframe_size += ETH_HLEN;
-+
-+ /* MAC takes cover the FCS for the calculation of frame size. */
-+ if (maxframe_size > PPE_PORT_MAC_MAX_FRAME_SIZE - ETH_FCS_LEN)
-+ return -EINVAL;
-+
-+ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * port;
-+ val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_MTU, maxframe_size);
-+ ret = regmap_update_bits(ppe_dev->regmap, reg,
-+ PPE_MC_MTU_CTRL_TBL_MTU,
-+ val);
-+ if (ret)
-+ return ret;
-+
-+ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * port;
-+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
-+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
-+ if (ret)
-+ return ret;
-+
-+ PPE_MRU_MTU_CTRL_SET_MRU(mru_mtu_val, maxframe_size);
-+ PPE_MRU_MTU_CTRL_SET_MTU(mru_mtu_val, maxframe_size);
-+
-+ return regmap_bulk_write(ppe_dev->regmap, reg,
-+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
-+}
-+
- /* PPE port and MAC reset */
- static int ppe_port_mac_reset(struct ppe_port *ppe_port)
- {
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
-@@ -89,4 +89,5 @@ void ppe_port_get_stats64(struct ppe_por
- struct rtnl_link_stats64 *s);
- int ppe_port_set_mac_address(struct ppe_port *ppe_port, const u8 *addr);
- int ppe_port_set_mac_eee(struct ppe_port *ppe_port, struct ethtool_eee *eee);
-+int ppe_port_set_maxframe(struct ppe_port *ppe_port, int maxframe_size);
- #endif
+++ /dev/null
-From f9246c9597e89510ae016c33ffa3b367ed83cf2d Mon Sep 17 00:00:00 2001
-From: Pavithra R <quic_pavir@quicinc.com>
-Date: Wed, 28 Feb 2024 11:25:15 +0530
-Subject: [PATCH 38/50] net: ethernet: qualcomm: Add EDMA support for QCOM
- IPQ9574 chipset.
-
-Add the infrastructure functions such as Makefile,
-EDMA hardware configuration, clock and IRQ initializations.
-
-Change-Id: I64f65e554e70e9095b0cf3636fec421569ae6895
-Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
-Co-developed-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
-Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/Makefile | 3 +
- drivers/net/ethernet/qualcomm/ppe/edma.c | 456 +++++++++++++++++++
- drivers/net/ethernet/qualcomm/ppe/edma.h | 99 ++++
- drivers/net/ethernet/qualcomm/ppe/ppe.c | 10 +-
- drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 253 ++++++++++
- 5 files changed, 820 insertions(+), 1 deletion(-)
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma.h
-
---- a/drivers/net/ethernet/qualcomm/ppe/Makefile
-+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
-@@ -5,3 +5,6 @@
-
- obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
- qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
-+
-+#EDMA
-+qcom-ppe-objs += edma.o
-\ No newline at end of file
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
-@@ -0,0 +1,456 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+ /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+ /* Qualcomm Ethernet DMA driver setup, HW configuration, clocks and
-+ * interrupt initializations.
-+ */
-+
-+#include <linux/clk.h>
-+#include <linux/delay.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/netdevice.h>
-+#include <linux/of_irq.h>
-+#include <linux/platform_device.h>
-+#include <linux/printk.h>
-+#include <linux/regmap.h>
-+#include <linux/reset.h>
-+
-+#include "edma.h"
-+#include "ppe_regs.h"
-+
-+#define EDMA_IRQ_NAME_SIZE 32
-+
-+/* Global EDMA context. */
-+struct edma_context *edma_ctx;
-+
-+/* Priority to multi-queue mapping. */
-+static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
-+ 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7};
-+
-+enum edma_clk_id {
-+ EDMA_CLK,
-+ EDMA_CFG_CLK,
-+ EDMA_CLK_MAX
-+};
-+
-+static const char * const clock_name[EDMA_CLK_MAX] = {
-+ [EDMA_CLK] = "edma",
-+ [EDMA_CFG_CLK] = "edma-cfg",
-+};
-+
-+/* Rx Fill ring info for IPQ9574. */
-+static struct edma_ring_info ipq9574_rxfill_ring_info = {
-+ .max_rings = 8,
-+ .ring_start = 4,
-+ .num_rings = 4,
-+};
-+
-+/* Rx ring info for IPQ9574. */
-+static struct edma_ring_info ipq9574_rx_ring_info = {
-+ .max_rings = 24,
-+ .ring_start = 20,
-+ .num_rings = 4,
-+};
-+
-+/* Tx ring info for IPQ9574. */
-+static struct edma_ring_info ipq9574_tx_ring_info = {
-+ .max_rings = 32,
-+ .ring_start = 8,
-+ .num_rings = 24,
-+};
-+
-+/* Tx complete ring info for IPQ9574. */
-+static struct edma_ring_info ipq9574_txcmpl_ring_info = {
-+ .max_rings = 32,
-+ .ring_start = 8,
-+ .num_rings = 24,
-+};
-+
-+/* HW info for IPQ9574. */
-+static struct edma_hw_info ipq9574_hw_info = {
-+ .rxfill = &ipq9574_rxfill_ring_info,
-+ .rx = &ipq9574_rx_ring_info,
-+ .tx = &ipq9574_tx_ring_info,
-+ .txcmpl = &ipq9574_txcmpl_ring_info,
-+ .max_ports = 6,
-+ .napi_budget_rx = 128,
-+ .napi_budget_tx = 512,
-+};
-+
-+static int edma_clock_set_and_enable(struct device *dev,
-+ const char *id, unsigned long rate)
-+{
-+ struct device_node *edma_np;
-+ struct clk *clk = NULL;
-+ int ret;
-+
-+ edma_np = of_get_child_by_name(dev->of_node, "edma");
-+
-+ clk = devm_get_clk_from_child(dev, edma_np, id);
-+ if (IS_ERR(clk)) {
-+ dev_err(dev, "clk %s get failed\n", id);
-+ of_node_put(edma_np);
-+ return PTR_ERR(clk);
-+ }
-+
-+ ret = clk_set_rate(clk, rate);
-+ if (ret) {
-+ dev_err(dev, "set %lu rate for %s failed\n", rate, id);
-+ of_node_put(edma_np);
-+ return ret;
-+ }
-+
-+ ret = clk_prepare_enable(clk);
-+ if (ret) {
-+ dev_err(dev, "clk %s enable failed\n", id);
-+ of_node_put(edma_np);
-+ return ret;
-+ }
-+
-+ of_node_put(edma_np);
-+
-+ dev_dbg(dev, "set %lu rate for %s\n", rate, id);
-+
-+ return 0;
-+}
-+
-+static int edma_clock_init(void)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct device *dev = ppe_dev->dev;
-+ unsigned long ppe_rate;
-+ int ret;
-+
-+ ppe_rate = ppe_dev->clk_rate;
-+
-+ ret = edma_clock_set_and_enable(dev, clock_name[EDMA_CLK],
-+ ppe_rate);
-+ if (ret)
-+ return ret;
-+
-+ ret = edma_clock_set_and_enable(dev, clock_name[EDMA_CFG_CLK],
-+ ppe_rate);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+/**
-+ * edma_configure_ucast_prio_map_tbl - Configure unicast priority map table.
-+ *
-+ * Map int_priority values to priority class and initialize
-+ * unicast priority map table for default profile_id.
-+ */
-+static int edma_configure_ucast_prio_map_tbl(void)
-+{
-+ u8 pri_class, int_pri;
-+ int ret = 0;
-+
-+ /* Set the priority class value for every possible priority. */
-+ for (int_pri = 0; int_pri < PPE_QUEUE_INTER_PRI_NUM; int_pri++) {
-+ pri_class = edma_pri_map[int_pri];
-+
-+ /* Priority offset should be less than maximum supported
-+ * queue priority.
-+ */
-+ if (pri_class > EDMA_PRI_MAX_PER_CORE - 1) {
-+ pr_err("Configured incorrect priority offset: %d\n",
-+ pri_class);
-+ return -EINVAL;
-+ }
-+
-+ ret = ppe_edma_queue_offset_config(edma_ctx->ppe_dev,
-+ PPE_QUEUE_CLASS_PRIORITY, int_pri, pri_class);
-+
-+ if (ret) {
-+ pr_err("Failed with error: %d to set queue priority class for int_pri: %d for profile_id: %d\n",
-+ ret, int_pri, 0);
-+ return ret;
-+ }
-+
-+ pr_debug("profile_id: %d, int_priority: %d, pri_class: %d\n",
-+ 0, int_pri, pri_class);
-+ }
-+
-+ return ret;
-+}
-+
-+static int edma_irq_init(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *txcmpl = hw_info->txcmpl;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ char edma_irq_name[EDMA_IRQ_NAME_SIZE];
-+ struct device *dev = ppe_dev->dev;
-+ struct platform_device *pdev;
-+ struct device_node *edma_np;
-+ u32 i;
-+
-+ pdev = to_platform_device(dev);
-+ edma_np = of_get_child_by_name(dev->of_node, "edma");
-+ edma_ctx->intr_info.intr_txcmpl = kzalloc((sizeof(*edma_ctx->intr_info.intr_txcmpl) *
-+ txcmpl->num_rings), GFP_KERNEL);
-+ if (!edma_ctx->intr_info.intr_txcmpl) {
-+ of_node_put(edma_np);
-+ return -ENOMEM;
-+ }
-+
-+ /* Get TXCMPL rings IRQ numbers. */
-+ for (i = 0; i < txcmpl->num_rings; i++) {
-+ snprintf(edma_irq_name, sizeof(edma_irq_name), "edma_txcmpl_%d",
-+ txcmpl->ring_start + i);
-+ edma_ctx->intr_info.intr_txcmpl[i] = of_irq_get_byname(edma_np, edma_irq_name);
-+ if (edma_ctx->intr_info.intr_txcmpl[i] < 0) {
-+ dev_err(dev, "%s: txcmpl_info.intr[%u] irq get failed\n",
-+ edma_np->name, i);
-+ of_node_put(edma_np);
-+ kfree(edma_ctx->intr_info.intr_txcmpl);
-+ return edma_ctx->intr_info.intr_txcmpl[i];
-+ }
-+
-+ dev_dbg(dev, "%s: intr_info.intr_txcmpl[%u] = %u\n",
-+ edma_np->name, i, edma_ctx->intr_info.intr_txcmpl[i]);
-+ }
-+
-+ edma_ctx->intr_info.intr_rx = kzalloc((sizeof(*edma_ctx->intr_info.intr_rx) *
-+ rx->num_rings), GFP_KERNEL);
-+ if (!edma_ctx->intr_info.intr_rx) {
-+ of_node_put(edma_np);
-+ kfree(edma_ctx->intr_info.intr_txcmpl);
-+ return -ENOMEM;
-+ }
-+
-+ /* Get RXDESC rings IRQ numbers. */
-+ for (i = 0; i < rx->num_rings; i++) {
-+ snprintf(edma_irq_name, sizeof(edma_irq_name), "edma_rxdesc_%d",
-+ rx->ring_start + i);
-+ edma_ctx->intr_info.intr_rx[i] = of_irq_get_byname(edma_np, edma_irq_name);
-+ if (edma_ctx->intr_info.intr_rx[i] < 0) {
-+ dev_err(dev, "%s: rx_queue_map_info.intr[%u] irq get failed\n",
-+ edma_np->name, i);
-+ of_node_put(edma_np);
-+ kfree(edma_ctx->intr_info.intr_rx);
-+ kfree(edma_ctx->intr_info.intr_txcmpl);
-+ return edma_ctx->intr_info.intr_rx[i];
-+ }
-+
-+ dev_dbg(dev, "%s: intr_info.intr_rx[%u] = %u\n",
-+ edma_np->name, i, edma_ctx->intr_info.intr_rx[i]);
-+ }
-+
-+ /* Get misc IRQ number. */
-+ edma_ctx->intr_info.intr_misc = of_irq_get_byname(edma_np, "edma_misc");
-+ if (edma_ctx->intr_info.intr_misc < 0) {
-+ dev_err(dev, "%s: misc_intr irq get failed\n", edma_np->name);
-+ of_node_put(edma_np);
-+ kfree(edma_ctx->intr_info.intr_rx);
-+ kfree(edma_ctx->intr_info.intr_txcmpl);
-+ return edma_ctx->intr_info.intr_misc;
-+ }
-+
-+ of_node_put(edma_np);
-+
-+ dev_dbg(dev, "%s: misc IRQ:%u\n", edma_np->name,
-+ edma_ctx->intr_info.intr_misc);
-+
-+ return 0;
-+}
-+
-+static int edma_hw_reset(void)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct device *dev = ppe_dev->dev;
-+ struct reset_control *edma_hw_rst;
-+ struct device_node *edma_np;
-+ const char *reset_string;
-+ u32 count, i;
-+ int ret;
-+
-+ /* Count and parse reset names from DTSI. */
-+ edma_np = of_get_child_by_name(dev->of_node, "edma");
-+ count = of_property_count_strings(edma_np, "reset-names");
-+ if (count < 0) {
-+ dev_err(dev, "EDMA reset entry not found\n");
-+ of_node_put(edma_np);
-+ return -EINVAL;
-+ }
-+
-+ for (i = 0; i < count; i++) {
-+ ret = of_property_read_string_index(edma_np, "reset-names",
-+ i, &reset_string);
-+ if (ret) {
-+ dev_err(dev, "Error reading reset-names");
-+ of_node_put(edma_np);
-+ return -EINVAL;
-+ }
-+
-+ edma_hw_rst = of_reset_control_get_exclusive(edma_np, reset_string);
-+ if (IS_ERR(edma_hw_rst)) {
-+ of_node_put(edma_np);
-+ return PTR_ERR(edma_hw_rst);
-+ }
-+
-+ /* 100ms delay is required by hardware to reset EDMA. */
-+ reset_control_assert(edma_hw_rst);
-+ fsleep(100);
-+
-+ reset_control_deassert(edma_hw_rst);
-+ fsleep(100);
-+
-+ reset_control_put(edma_hw_rst);
-+ dev_dbg(dev, "EDMA HW reset, i:%d reset_string:%s\n", i, reset_string);
-+ }
-+
-+ of_node_put(edma_np);
-+
-+ return 0;
-+}
-+
-+static int edma_hw_configure(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 data, reg;
-+ int ret;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_MAS_CTRL_ADDR;
-+ ret = regmap_read(regmap, reg, &data);
-+ if (ret)
-+ return ret;
-+
-+ pr_debug("EDMA ver %d hw init\n", data);
-+
-+ /* Setup private data structure. */
-+ edma_ctx->intr_info.intr_mask_rx = EDMA_RXDESC_INT_MASK_PKT_INT;
-+ edma_ctx->intr_info.intr_mask_txcmpl = EDMA_TX_INT_MASK_PKT_INT;
-+
-+ /* Reset EDMA. */
-+ ret = edma_hw_reset();
-+ if (ret) {
-+ pr_err("Error in resetting the hardware. ret: %d\n", ret);
-+ return ret;
-+ }
-+
-+ /* Allocate memory for netdevices. */
-+ edma_ctx->netdev_arr = kzalloc((sizeof(**edma_ctx->netdev_arr) *
-+ hw_info->max_ports),
-+ GFP_KERNEL);
-+ if (!edma_ctx->netdev_arr)
-+ return -ENOMEM;
-+
-+ /* Configure DMA request priority, DMA read burst length,
-+ * and AXI write size.
-+ */
-+ data = FIELD_PREP(EDMA_DMAR_BURST_LEN_MASK, EDMA_BURST_LEN_ENABLE);
-+ data |= FIELD_PREP(EDMA_DMAR_REQ_PRI_MASK, 0);
-+ data |= FIELD_PREP(EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK, 31);
-+ data |= FIELD_PREP(EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK, 7);
-+ data |= FIELD_PREP(EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK, 7);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_DMAR_CTRL_ADDR;
-+ ret = regmap_write(regmap, reg, data);
-+ if (ret)
-+ return ret;
-+
-+ /* Configure Tx Timeout Threshold. */
-+ data = EDMA_TX_TIMEOUT_THRESH_VAL;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TX_TIMEOUT_THRESH_ADDR;
-+ ret = regmap_write(regmap, reg, data);
-+ if (ret)
-+ return ret;
-+
-+ /* Set Miscellaneous error mask. */
-+ data = EDMA_MISC_AXI_RD_ERR_MASK |
-+ EDMA_MISC_AXI_WR_ERR_MASK |
-+ EDMA_MISC_RX_DESC_FIFO_FULL_MASK |
-+ EDMA_MISC_RX_ERR_BUF_SIZE_MASK |
-+ EDMA_MISC_TX_SRAM_FULL_MASK |
-+ EDMA_MISC_TX_CMPL_BUF_FULL_MASK |
-+ EDMA_MISC_DATA_LEN_ERR_MASK;
-+ data |= EDMA_MISC_TX_TIMEOUT_MASK;
-+ edma_ctx->intr_info.intr_mask_misc = data;
-+
-+ /* Global EDMA enable and padding enable. */
-+ data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_PORT_CTRL_ADDR;
-+ ret = regmap_write(regmap, reg, data);
-+ if (ret)
-+ return ret;
-+
-+ /* Initialize unicast priority map table. */
-+ ret = (int)edma_configure_ucast_prio_map_tbl();
-+ if (ret) {
-+ pr_err("Failed to initialize unicast priority map table: %d\n",
-+ ret);
-+ kfree(edma_ctx->netdev_arr);
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * edma_destroy - EDMA Destroy.
-+ * @ppe_dev: PPE device
-+ *
-+ * Free the memory allocated during setup.
-+ */
-+void edma_destroy(struct ppe_device *ppe_dev)
-+{
-+ kfree(edma_ctx->intr_info.intr_rx);
-+ kfree(edma_ctx->intr_info.intr_txcmpl);
-+ kfree(edma_ctx->netdev_arr);
-+}
-+
-+/**
-+ * edma_setup - EDMA Setup.
-+ * @ppe_dev: PPE device
-+ *
-+ * Configure Ethernet global ctx, clocks, hardware and interrupts.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int edma_setup(struct ppe_device *ppe_dev)
-+{
-+ struct device *dev = ppe_dev->dev;
-+ int ret;
-+
-+ edma_ctx = devm_kzalloc(dev, sizeof(*edma_ctx), GFP_KERNEL);
-+ if (!edma_ctx)
-+ return -ENOMEM;
-+
-+ edma_ctx->hw_info = &ipq9574_hw_info;
-+ edma_ctx->ppe_dev = ppe_dev;
-+
-+ /* Configure the EDMA common clocks. */
-+ ret = edma_clock_init();
-+ if (ret) {
-+ dev_err(dev, "Error in configuring the EDMA clocks\n");
-+ return ret;
-+ }
-+
-+ dev_dbg(dev, "QCOM EDMA common clocks are configured\n");
-+
-+ ret = edma_hw_configure();
-+ if (ret) {
-+ dev_err(dev, "Error in edma configuration\n");
-+ return ret;
-+ }
-+
-+ ret = edma_irq_init();
-+ if (ret) {
-+ dev_err(dev, "Error in irq initialization\n");
-+ return ret;
-+ }
-+
-+ dev_info(dev, "EDMA configuration successful\n");
-+
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
-@@ -0,0 +1,99 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef __EDMA_MAIN__
-+#define __EDMA_MAIN__
-+
-+#include "ppe_api.h"
-+
-+/* One clock cycle = 1/(EDMA clock frequency in Mhz) micro seconds.
-+ *
-+ * One timer unit is 128 clock cycles.
-+ *
-+ * So, therefore the microsecond to timer unit calculation is:
-+ * Timer unit = time in microseconds / (one clock cycle in microsecond * cycles in 1 timer unit)
-+ * = ('x' microsecond * EDMA clock frequency in MHz ('y') / 128).
-+ *
-+ */
-+#define EDMA_CYCLE_PER_TIMER_UNIT 128
-+#define EDMA_MICROSEC_TO_TIMER_UNIT(x, y) ((x) * (y) / EDMA_CYCLE_PER_TIMER_UNIT)
-+#define MHZ 1000000UL
-+
-+/* EDMA profile ID. */
-+#define EDMA_CPU_PORT_PROFILE_ID 0
-+
-+/* Number of PPE queue priorities supported per ARM core. */
-+#define EDMA_PRI_MAX_PER_CORE 8
-+
-+/**
-+ * struct edma_ring_info - EDMA ring data structure.
-+ * @max_rings: Maximum number of rings
-+ * @ring_start: Ring start ID
-+ * @num_rings: Number of rings
-+ */
-+struct edma_ring_info {
-+ u32 max_rings;
-+ u32 ring_start;
-+ u32 num_rings;
-+};
-+
-+/**
-+ * struct edma_hw_info - EDMA hardware data structure.
-+ * @rxfill: Rx Fill ring information
-+ * @rx: Rx Desc ring information
-+ * @tx: Tx Desc ring information
-+ * @txcmpl: Tx complete ring information
-+ * @max_ports: Maximum number of ports
-+ * @napi_budget_rx: Rx NAPI budget
-+ * @napi_budget_tx: Tx NAPI budget
-+ */
-+struct edma_hw_info {
-+ struct edma_ring_info *rxfill;
-+ struct edma_ring_info *rx;
-+ struct edma_ring_info *tx;
-+ struct edma_ring_info *txcmpl;
-+ u32 max_ports;
-+ u32 napi_budget_rx;
-+ u32 napi_budget_tx;
-+};
-+
-+/**
-+ * struct edma_intr_info - EDMA interrupt data structure.
-+ * @intr_mask_rx: RX interrupt mask
-+ * @intr_rx: Rx interrupts
-+ * @intr_mask_txcmpl: Tx completion interrupt mask
-+ * @intr_txcmpl: Tx completion interrupts
-+ * @intr_mask_misc: Miscellaneous interrupt mask
-+ * @intr_misc: Miscellaneous interrupts
-+ */
-+struct edma_intr_info {
-+ u32 intr_mask_rx;
-+ u32 *intr_rx;
-+ u32 intr_mask_txcmpl;
-+ u32 *intr_txcmpl;
-+ u32 intr_mask_misc;
-+ u32 intr_misc;
-+};
-+
-+/**
-+ * struct edma_context - EDMA context.
-+ * @netdev_arr: Net device for each EDMA port
-+ * @ppe_dev: PPE device
-+ * @hw_info: EDMA Hardware info
-+ * @intr_info: EDMA Interrupt info
-+ */
-+struct edma_context {
-+ struct net_device **netdev_arr;
-+ struct ppe_device *ppe_dev;
-+ struct edma_hw_info *hw_info;
-+ struct edma_intr_info intr_info;
-+};
-+
-+/* Global EDMA context. */
-+extern struct edma_context *edma_ctx;
-+
-+void edma_destroy(struct ppe_device *ppe_dev);
-+int edma_setup(struct ppe_device *ppe_dev);
-+
-+#endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
-@@ -14,6 +14,7 @@
- #include <linux/regmap.h>
- #include <linux/reset.h>
-
-+#include "edma.h"
- #include "ppe.h"
- #include "ppe_config.h"
- #include "ppe_debugfs.h"
-@@ -208,10 +209,16 @@ static int qcom_ppe_probe(struct platfor
- if (ret)
- return dev_err_probe(dev, ret, "PPE HW config failed\n");
-
-- ret = ppe_port_mac_init(ppe_dev);
-+ ret = edma_setup(ppe_dev);
- if (ret)
-+ return dev_err_probe(dev, ret, "EDMA setup failed\n");
-+
-+ ret = ppe_port_mac_init(ppe_dev);
-+ if (ret) {
-+ edma_destroy(ppe_dev);
- return dev_err_probe(dev, ret,
- "PPE Port MAC initialization failed\n");
-+ }
-
- ppe_debugfs_setup(ppe_dev);
- platform_set_drvdata(pdev, ppe_dev);
-@@ -226,6 +233,7 @@ static void qcom_ppe_remove(struct platf
- ppe_dev = platform_get_drvdata(pdev);
- ppe_debugfs_teardown(ppe_dev);
- ppe_port_mac_deinit(ppe_dev);
-+ edma_destroy(ppe_dev);
-
- platform_set_drvdata(pdev, NULL);
- }
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
-@@ -788,4 +788,257 @@
- #define XGMAC_RXDISCARD_GB_ADDR 0x9AC
- #define XGMAC_RXDISCARDBYTE_GB_ADDR 0x9B4
-
-+#define EDMA_BASE_OFFSET 0xb00000
-+
-+/* EDMA register offsets */
-+#define EDMA_REG_MAS_CTRL_ADDR 0x0
-+#define EDMA_REG_PORT_CTRL_ADDR 0x4
-+#define EDMA_REG_VLAN_CTRL_ADDR 0x8
-+#define EDMA_REG_RXDESC2FILL_MAP_0_ADDR 0x14
-+#define EDMA_REG_RXDESC2FILL_MAP_1_ADDR 0x18
-+#define EDMA_REG_RXDESC2FILL_MAP_2_ADDR 0x1c
-+#define EDMA_REG_TXQ_CTRL_ADDR 0x20
-+#define EDMA_REG_TXQ_CTRL_2_ADDR 0x24
-+#define EDMA_REG_TXQ_FC_0_ADDR 0x28
-+#define EDMA_REG_TXQ_FC_1_ADDR 0x30
-+#define EDMA_REG_TXQ_FC_2_ADDR 0x34
-+#define EDMA_REG_TXQ_FC_3_ADDR 0x38
-+#define EDMA_REG_RXQ_CTRL_ADDR 0x3c
-+#define EDMA_REG_MISC_ERR_QID_ADDR 0x40
-+#define EDMA_REG_RXQ_FC_THRE_ADDR 0x44
-+#define EDMA_REG_DMAR_CTRL_ADDR 0x48
-+#define EDMA_REG_AXIR_CTRL_ADDR 0x4c
-+#define EDMA_REG_AXIW_CTRL_ADDR 0x50
-+#define EDMA_REG_MIN_MSS_ADDR 0x54
-+#define EDMA_REG_LOOPBACK_CTRL_ADDR 0x58
-+#define EDMA_REG_MISC_INT_STAT_ADDR 0x5c
-+#define EDMA_REG_MISC_INT_MASK_ADDR 0x60
-+#define EDMA_REG_DBG_CTRL_ADDR 0x64
-+#define EDMA_REG_DBG_DATA_ADDR 0x68
-+#define EDMA_REG_TX_TIMEOUT_THRESH_ADDR 0x6c
-+#define EDMA_REG_REQ0_FIFO_THRESH_ADDR 0x80
-+#define EDMA_REG_WB_OS_THRESH_ADDR 0x84
-+#define EDMA_REG_MISC_ERR_QID_REG2_ADDR 0x88
-+#define EDMA_REG_TXDESC2CMPL_MAP_0_ADDR 0x8c
-+#define EDMA_REG_TXDESC2CMPL_MAP_1_ADDR 0x90
-+#define EDMA_REG_TXDESC2CMPL_MAP_2_ADDR 0x94
-+#define EDMA_REG_TXDESC2CMPL_MAP_3_ADDR 0x98
-+#define EDMA_REG_TXDESC2CMPL_MAP_4_ADDR 0x9c
-+#define EDMA_REG_TXDESC2CMPL_MAP_5_ADDR 0xa0
-+
-+/* Tx descriptor ring configuration register addresses */
-+#define EDMA_REG_TXDESC_BA(n) (0x1000 + (0x1000 * (n)))
-+#define EDMA_REG_TXDESC_PROD_IDX(n) (0x1004 + (0x1000 * (n)))
-+#define EDMA_REG_TXDESC_CONS_IDX(n) (0x1008 + (0x1000 * (n)))
-+#define EDMA_REG_TXDESC_RING_SIZE(n) (0x100c + (0x1000 * (n)))
-+#define EDMA_REG_TXDESC_CTRL(n) (0x1010 + (0x1000 * (n)))
-+#define EDMA_REG_TXDESC_BA2(n) (0x1014 + (0x1000 * (n)))
-+
-+/* RxFill ring configuration register addresses */
-+#define EDMA_REG_RXFILL_BA(n) (0x29000 + (0x1000 * (n)))
-+#define EDMA_REG_RXFILL_PROD_IDX(n) (0x29004 + (0x1000 * (n)))
-+#define EDMA_REG_RXFILL_CONS_IDX(n) (0x29008 + (0x1000 * (n)))
-+#define EDMA_REG_RXFILL_RING_SIZE(n) (0x2900c + (0x1000 * (n)))
-+#define EDMA_REG_RXFILL_BUFFER1_SIZE(n) (0x29010 + (0x1000 * (n)))
-+#define EDMA_REG_RXFILL_FC_THRE(n) (0x29014 + (0x1000 * (n)))
-+#define EDMA_REG_RXFILL_UGT_THRE(n) (0x29018 + (0x1000 * (n)))
-+#define EDMA_REG_RXFILL_RING_EN(n) (0x2901c + (0x1000 * (n)))
-+#define EDMA_REG_RXFILL_DISABLE(n) (0x29020 + (0x1000 * (n)))
-+#define EDMA_REG_RXFILL_DISABLE_DONE(n) (0x29024 + (0x1000 * (n)))
-+#define EDMA_REG_RXFILL_INT_STAT(n) (0x31000 + (0x1000 * (n)))
-+#define EDMA_REG_RXFILL_INT_MASK(n) (0x31004 + (0x1000 * (n)))
-+
-+/* Rx descriptor ring configuration register addresses */
-+#define EDMA_REG_RXDESC_BA(n) (0x39000 + (0x1000 * (n)))
-+#define EDMA_REG_RXDESC_PROD_IDX(n) (0x39004 + (0x1000 * (n)))
-+#define EDMA_REG_RXDESC_CONS_IDX(n) (0x39008 + (0x1000 * (n)))
-+#define EDMA_REG_RXDESC_RING_SIZE(n) (0x3900c + (0x1000 * (n)))
-+#define EDMA_REG_RXDESC_FC_THRE(n) (0x39010 + (0x1000 * (n)))
-+#define EDMA_REG_RXDESC_UGT_THRE(n) (0x39014 + (0x1000 * (n)))
-+#define EDMA_REG_RXDESC_CTRL(n) (0x39018 + (0x1000 * (n)))
-+#define EDMA_REG_RXDESC_BPC(n) (0x3901c + (0x1000 * (n)))
-+#define EDMA_REG_RXDESC_DISABLE(n) (0x39020 + (0x1000 * (n)))
-+#define EDMA_REG_RXDESC_DISABLE_DONE(n) (0x39024 + (0x1000 * (n)))
-+#define EDMA_REG_RXDESC_PREHEADER_BA(n) (0x39028 + (0x1000 * (n)))
-+#define EDMA_REG_RXDESC_INT_STAT(n) (0x59000 + (0x1000 * (n)))
-+#define EDMA_REG_RXDESC_INT_MASK(n) (0x59004 + (0x1000 * (n)))
-+
-+#define EDMA_REG_RX_MOD_TIMER(n) (0x59008 + (0x1000 * (n)))
-+#define EDMA_REG_RX_INT_CTRL(n) (0x5900c + (0x1000 * (n)))
-+
-+/* Tx completion ring configuration register addresses */
-+#define EDMA_REG_TXCMPL_BA(n) (0x79000 + (0x1000 * (n)))
-+#define EDMA_REG_TXCMPL_PROD_IDX(n) (0x79004 + (0x1000 * (n)))
-+#define EDMA_REG_TXCMPL_CONS_IDX(n) (0x79008 + (0x1000 * (n)))
-+#define EDMA_REG_TXCMPL_RING_SIZE(n) (0x7900c + (0x1000 * (n)))
-+#define EDMA_REG_TXCMPL_UGT_THRE(n) (0x79010 + (0x1000 * (n)))
-+#define EDMA_REG_TXCMPL_CTRL(n) (0x79014 + (0x1000 * (n)))
-+#define EDMA_REG_TXCMPL_BPC(n) (0x79018 + (0x1000 * (n)))
-+
-+#define EDMA_REG_TX_INT_STAT(n) (0x99000 + (0x1000 * (n)))
-+#define EDMA_REG_TX_INT_MASK(n) (0x99004 + (0x1000 * (n)))
-+#define EDMA_REG_TX_MOD_TIMER(n) (0x99008 + (0x1000 * (n)))
-+#define EDMA_REG_TX_INT_CTRL(n) (0x9900c + (0x1000 * (n)))
-+
-+/* EDMA_QID2RID_TABLE_MEM register field masks */
-+#define EDMA_RX_RING_ID_QUEUE0_MASK GENMASK(7, 0)
-+#define EDMA_RX_RING_ID_QUEUE1_MASK GENMASK(15, 8)
-+#define EDMA_RX_RING_ID_QUEUE2_MASK GENMASK(23, 16)
-+#define EDMA_RX_RING_ID_QUEUE3_MASK GENMASK(31, 24)
-+
-+/* EDMA_REG_PORT_CTRL register bit definitions */
-+#define EDMA_PORT_PAD_EN 0x1
-+#define EDMA_PORT_EDMA_EN 0x2
-+
-+/* EDMA_REG_DMAR_CTRL register field masks */
-+#define EDMA_DMAR_REQ_PRI_MASK GENMASK(2, 0)
-+#define EDMA_DMAR_BURST_LEN_MASK BIT(3)
-+#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK GENMASK(8, 4)
-+#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK GENMASK(11, 9)
-+#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK GENMASK(14, 12)
-+
-+#define EDMA_BURST_LEN_ENABLE 0
-+
-+/* Tx timeout threshold */
-+#define EDMA_TX_TIMEOUT_THRESH_VAL 0xFFFF
-+
-+/* Rx descriptor ring base address mask */
-+#define EDMA_RXDESC_BA_MASK 0xffffffff
-+
-+/* Rx Descriptor ring pre-header base address mask */
-+#define EDMA_RXDESC_PREHEADER_BA_MASK 0xffffffff
-+
-+/* Tx descriptor prod ring index mask */
-+#define EDMA_TXDESC_PROD_IDX_MASK 0xffff
-+
-+/* Tx descriptor consumer ring index mask */
-+#define EDMA_TXDESC_CONS_IDX_MASK 0xffff
-+
-+/* Tx descriptor ring size mask */
-+#define EDMA_TXDESC_RING_SIZE_MASK 0xffff
-+
-+/* Tx descriptor ring enable */
-+#define EDMA_TXDESC_TX_ENABLE 0x1
-+
-+#define EDMA_TXDESC_CTRL_TXEN_MASK BIT(0)
-+#define EDMA_TXDESC_CTRL_FC_GRP_ID_MASK GENMASK(3, 1)
-+
-+/* Tx completion ring prod index mask */
-+#define EDMA_TXCMPL_PROD_IDX_MASK 0xffff
-+
-+/* Tx completion ring urgent threshold mask */
-+#define EDMA_TXCMPL_LOW_THRE_MASK 0xffff
-+#define EDMA_TXCMPL_LOW_THRE_SHIFT 0
-+
-+/* EDMA_REG_TX_MOD_TIMER mask */
-+#define EDMA_TX_MOD_TIMER_INIT_MASK 0xffff
-+#define EDMA_TX_MOD_TIMER_INIT_SHIFT 0
-+
-+/* Rx fill ring prod index mask */
-+#define EDMA_RXFILL_PROD_IDX_MASK 0xffff
-+
-+/* Rx fill ring consumer index mask */
-+#define EDMA_RXFILL_CONS_IDX_MASK 0xffff
-+
-+/* Rx fill ring size mask */
-+#define EDMA_RXFILL_RING_SIZE_MASK 0xffff
-+
-+/* Rx fill ring flow control threshold masks */
-+#define EDMA_RXFILL_FC_XON_THRE_MASK 0x7ff
-+#define EDMA_RXFILL_FC_XON_THRE_SHIFT 12
-+#define EDMA_RXFILL_FC_XOFF_THRE_MASK 0x7ff
-+#define EDMA_RXFILL_FC_XOFF_THRE_SHIFT 0
-+
-+/* Rx fill ring enable bit */
-+#define EDMA_RXFILL_RING_EN 0x1
-+
-+/* Rx desc ring prod index mask */
-+#define EDMA_RXDESC_PROD_IDX_MASK 0xffff
-+
-+/* Rx descriptor ring cons index mask */
-+#define EDMA_RXDESC_CONS_IDX_MASK 0xffff
-+
-+/* Rx descriptor ring size masks */
-+#define EDMA_RXDESC_RING_SIZE_MASK 0xffff
-+#define EDMA_RXDESC_PL_OFFSET_MASK 0x1ff
-+#define EDMA_RXDESC_PL_OFFSET_SHIFT 16
-+#define EDMA_RXDESC_PL_DEFAULT_VALUE 0
-+
-+/* Rx descriptor ring flow control threshold masks */
-+#define EDMA_RXDESC_FC_XON_THRE_MASK 0x7ff
-+#define EDMA_RXDESC_FC_XON_THRE_SHIFT 12
-+#define EDMA_RXDESC_FC_XOFF_THRE_MASK 0x7ff
-+#define EDMA_RXDESC_FC_XOFF_THRE_SHIFT 0
-+
-+/* Rx descriptor ring urgent threshold mask */
-+#define EDMA_RXDESC_LOW_THRE_MASK 0xffff
-+#define EDMA_RXDESC_LOW_THRE_SHIFT 0
-+
-+/* Rx descriptor ring enable bit */
-+#define EDMA_RXDESC_RX_EN 0x1
-+
-+/* Tx interrupt status bit */
-+#define EDMA_TX_INT_MASK_PKT_INT 0x1
-+
-+/* Rx interrupt mask */
-+#define EDMA_RXDESC_INT_MASK_PKT_INT 0x1
-+
-+#define EDMA_MASK_INT_DISABLE 0x0
-+#define EDMA_MASK_INT_CLEAR 0x0
-+
-+/* EDMA_REG_RX_MOD_TIMER register field masks */
-+#define EDMA_RX_MOD_TIMER_INIT_MASK 0xffff
-+#define EDMA_RX_MOD_TIMER_INIT_SHIFT 0
-+
-+/* EDMA Ring mask */
-+#define EDMA_RING_DMA_MASK 0xffffffff
-+
-+/* RXDESC threshold interrupt. */
-+#define EDMA_RXDESC_UGT_INT_STAT 0x2
-+
-+/* RXDESC timer interrupt */
-+#define EDMA_RXDESC_PKT_INT_STAT 0x1
-+
-+/* RXDESC Interrupt status mask */
-+#define EDMA_RXDESC_RING_INT_STATUS_MASK \
-+ (EDMA_RXDESC_UGT_INT_STAT | EDMA_RXDESC_PKT_INT_STAT)
-+
-+/* TXCMPL threshold interrupt. */
-+#define EDMA_TXCMPL_UGT_INT_STAT 0x2
-+
-+/* TXCMPL timer interrupt */
-+#define EDMA_TXCMPL_PKT_INT_STAT 0x1
-+
-+/* TXCMPL Interrupt status mask */
-+#define EDMA_TXCMPL_RING_INT_STATUS_MASK \
-+ (EDMA_TXCMPL_UGT_INT_STAT | EDMA_TXCMPL_PKT_INT_STAT)
-+
-+#define EDMA_TXCMPL_RETMODE_OPAQUE 0x0
-+
-+#define EDMA_RXDESC_LOW_THRE 0
-+#define EDMA_RX_MOD_TIMER_INIT 1000
-+#define EDMA_RX_NE_INT_EN 0x2
-+
-+#define EDMA_TX_MOD_TIMER 150
-+
-+#define EDMA_TX_INITIAL_PROD_IDX 0x0
-+#define EDMA_TX_NE_INT_EN 0x2
-+
-+/* EDMA misc error mask */
-+#define EDMA_MISC_AXI_RD_ERR_MASK BIT(0)
-+#define EDMA_MISC_AXI_WR_ERR_MASK BIT(1)
-+#define EDMA_MISC_RX_DESC_FIFO_FULL_MASK BIT(2)
-+#define EDMA_MISC_RX_ERR_BUF_SIZE_MASK BIT(3)
-+#define EDMA_MISC_TX_SRAM_FULL_MASK BIT(4)
-+#define EDMA_MISC_TX_CMPL_BUF_FULL_MASK BIT(5)
-+
-+#define EDMA_MISC_DATA_LEN_ERR_MASK BIT(6)
-+#define EDMA_MISC_TX_TIMEOUT_MASK BIT(7)
-+
-+/* EDMA txdesc2cmpl map */
-+#define EDMA_TXDESC2CMPL_MAP_TXDESC_MASK 0x1F
-+
-+/* EDMA rxdesc2fill map */
-+#define EDMA_RXDESC2FILL_MAP_RXDESC_MASK 0x7
-+
- #endif
+++ /dev/null
-From cbcaf81cd148b77ee0570a482b536f269a9f6657 Mon Sep 17 00:00:00 2001
-From: Suruchi Agarwal <quic_suruchia@quicinc.com>
-Date: Thu, 21 Mar 2024 16:14:46 -0700
-Subject: [PATCH 39/50] net: ethernet: qualcomm: Add netdevice support for QCOM
- IPQ9574 chipset.
-
-Add EDMA ports and netdevice operations for QCOM IPQ9574 chipset.
-
-Change-Id: I08b2eff52b4ef0d6d428c1c416f5580ef010973f
-Co-developed-by: Pavithra R <quic_pavir@quicinc.com>
-Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
-Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
- drivers/net/ethernet/qualcomm/ppe/edma.h | 3 +
- drivers/net/ethernet/qualcomm/ppe/edma_port.c | 270 ++++++++++++++++++
- drivers/net/ethernet/qualcomm/ppe/edma_port.h | 31 ++
- drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 19 ++
- 5 files changed, 324 insertions(+), 1 deletion(-)
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_port.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_port.h
-
---- a/drivers/net/ethernet/qualcomm/ppe/Makefile
-+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
-@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
- qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
-
- #EDMA
--qcom-ppe-objs += edma.o
-\ No newline at end of file
-+qcom-ppe-objs += edma.o edma_port.o
-\ No newline at end of file
---- a/drivers/net/ethernet/qualcomm/ppe/edma.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
-@@ -26,6 +26,9 @@
- /* Number of PPE queue priorities supported per ARM core. */
- #define EDMA_PRI_MAX_PER_CORE 8
-
-+/* Interface ID start. */
-+#define EDMA_START_IFNUM 1
-+
- /**
- * struct edma_ring_info - EDMA ring data structure.
- * @max_rings: Maximum number of rings
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
-@@ -0,0 +1,270 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+ /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* EDMA port initialization, configuration and netdevice ops handling */
-+
-+#include <linux/etherdevice.h>
-+#include <linux/net.h>
-+#include <linux/netdevice.h>
-+#include <linux/of_net.h>
-+#include <linux/phylink.h>
-+#include <linux/printk.h>
-+
-+#include "edma.h"
-+#include "edma_port.h"
-+#include "ppe_regs.h"
-+
-+/* Number of netdev queues. */
-+#define EDMA_NETDEV_QUEUE_NUM 4
-+
-+static u16 __maybe_unused edma_port_select_queue(__maybe_unused struct net_device *netdev,
-+ __maybe_unused struct sk_buff *skb,
-+ __maybe_unused struct net_device *sb_dev)
-+{
-+ int cpu = get_cpu();
-+
-+ put_cpu();
-+
-+ return cpu;
-+}
-+
-+static int edma_port_open(struct net_device *netdev)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct ppe_port *ppe_port;
-+
-+ if (!port_priv)
-+ return -EINVAL;
-+
-+ /* Inform the Linux Networking stack about the hardware capability of
-+ * checksum offloading and other features. Each port is
-+ * responsible to maintain the feature set it supports.
-+ */
-+ netdev->features |= EDMA_NETDEV_FEATURES;
-+ netdev->hw_features |= EDMA_NETDEV_FEATURES;
-+ netdev->vlan_features |= EDMA_NETDEV_FEATURES;
-+ netdev->wanted_features |= EDMA_NETDEV_FEATURES;
-+
-+ ppe_port = port_priv->ppe_port;
-+
-+ if (ppe_port->phylink)
-+ phylink_start(ppe_port->phylink);
-+
-+ netif_start_queue(netdev);
-+
-+ return 0;
-+}
-+
-+static int edma_port_close(struct net_device *netdev)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct ppe_port *ppe_port;
-+
-+ if (!port_priv)
-+ return -EINVAL;
-+
-+ netif_stop_queue(netdev);
-+
-+ ppe_port = port_priv->ppe_port;
-+
-+ /* Phylink close. */
-+ if (ppe_port->phylink)
-+ phylink_stop(ppe_port->phylink);
-+
-+ return 0;
-+}
-+
-+static int edma_port_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct ppe_port *ppe_port;
-+ int ret = -EINVAL;
-+
-+ if (!port_priv)
-+ return -EINVAL;
-+
-+ ppe_port = port_priv->ppe_port;
-+ if (ppe_port->phylink)
-+ return phylink_mii_ioctl(ppe_port->phylink, ifr, cmd);
-+
-+ return ret;
-+}
-+
-+static int edma_port_change_mtu(struct net_device *netdev, int mtu)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+
-+ if (!port_priv)
-+ return -EINVAL;
-+
-+ netdev->mtu = mtu;
-+
-+ return ppe_port_set_maxframe(port_priv->ppe_port, mtu);
-+}
-+
-+static netdev_features_t edma_port_feature_check(__maybe_unused struct sk_buff *skb,
-+ __maybe_unused struct net_device *netdev,
-+ netdev_features_t features)
-+{
-+ return features;
-+}
-+
-+static void edma_port_get_stats64(struct net_device *netdev,
-+ struct rtnl_link_stats64 *stats)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+
-+ if (!port_priv)
-+ return;
-+
-+ ppe_port_get_stats64(port_priv->ppe_port, stats);
-+}
-+
-+static int edma_port_set_mac_address(struct net_device *netdev, void *macaddr)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct sockaddr *addr = (struct sockaddr *)macaddr;
-+ int ret;
-+
-+ if (!port_priv)
-+ return -EINVAL;
-+
-+ netdev_dbg(netdev, "AddrFamily: %d, %0x:%0x:%0x:%0x:%0x:%0x\n",
-+ addr->sa_family, addr->sa_data[0], addr->sa_data[1],
-+ addr->sa_data[2], addr->sa_data[3], addr->sa_data[4],
-+ addr->sa_data[5]);
-+
-+ ret = eth_prepare_mac_addr_change(netdev, addr);
-+ if (ret)
-+ return ret;
-+
-+ if (ppe_port_set_mac_address(port_priv->ppe_port, (u8 *)addr)) {
-+ netdev_err(netdev, "set mac address failed for dev: %s\n", netdev->name);
-+ return -EINVAL;
-+ }
-+
-+ eth_commit_mac_addr_change(netdev, addr);
-+
-+ return 0;
-+}
-+
-+static const struct net_device_ops edma_port_netdev_ops = {
-+ .ndo_open = edma_port_open,
-+ .ndo_stop = edma_port_close,
-+ .ndo_get_stats64 = edma_port_get_stats64,
-+ .ndo_set_mac_address = edma_port_set_mac_address,
-+ .ndo_validate_addr = eth_validate_addr,
-+ .ndo_change_mtu = edma_port_change_mtu,
-+ .ndo_eth_ioctl = edma_port_ioctl,
-+ .ndo_features_check = edma_port_feature_check,
-+ .ndo_select_queue = edma_port_select_queue,
-+};
-+
-+/**
-+ * edma_port_destroy - EDMA port destroy.
-+ * @port: PPE port
-+ *
-+ * Unregister and free the netdevice.
-+ */
-+void edma_port_destroy(struct ppe_port *port)
-+{
-+ int port_id = port->port_id;
-+ struct net_device *netdev = edma_ctx->netdev_arr[port_id - 1];
-+
-+ unregister_netdev(netdev);
-+ free_netdev(netdev);
-+ ppe_port_phylink_destroy(port);
-+ edma_ctx->netdev_arr[port_id - 1] = NULL;
-+}
-+
-+/**
-+ * edma_port_setup - EDMA port Setup.
-+ * @port: PPE port
-+ *
-+ * Initialize and register the netdevice.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int edma_port_setup(struct ppe_port *port)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct device_node *np = port->np;
-+ struct edma_port_priv *port_priv;
-+ int port_id = port->port_id;
-+ struct net_device *netdev;
-+ u8 mac_addr[ETH_ALEN];
-+ int ret = 0;
-+ u8 *maddr;
-+
-+ netdev = alloc_etherdev_mqs(sizeof(struct edma_port_priv),
-+ EDMA_NETDEV_QUEUE_NUM, EDMA_NETDEV_QUEUE_NUM);
-+ if (!netdev) {
-+ pr_err("alloc_etherdev() failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ SET_NETDEV_DEV(netdev, ppe_dev->dev);
-+ netdev->dev.of_node = np;
-+
-+ /* max_mtu is set to 1500 in ether_setup(). */
-+ netdev->max_mtu = ETH_MAX_MTU;
-+
-+ port_priv = netdev_priv(netdev);
-+ memset((void *)port_priv, 0, sizeof(struct edma_port_priv));
-+
-+ port_priv->ppe_port = port;
-+ port_priv->netdev = netdev;
-+ netdev->watchdog_timeo = 5 * HZ;
-+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
-+ netdev->netdev_ops = &edma_port_netdev_ops;
-+ netdev->gso_max_segs = GSO_MAX_SEGS;
-+
-+ maddr = mac_addr;
-+ if (of_get_mac_address(np, maddr))
-+ maddr = NULL;
-+
-+ if (maddr && is_valid_ether_addr(maddr)) {
-+ eth_hw_addr_set(netdev, maddr);
-+ } else {
-+ eth_hw_addr_random(netdev);
-+ netdev_info(netdev, "GMAC%d Using random MAC address - %pM\n",
-+ port_id, netdev->dev_addr);
-+ }
-+
-+ netdev_dbg(netdev, "Configuring the port %s(qcom-id:%d)\n",
-+ netdev->name, port_id);
-+
-+ /* We expect 'port_id' to correspond to ports numbers on SoC.
-+ * These begin from '1' and hence we subtract
-+ * one when using it as an array index.
-+ */
-+ edma_ctx->netdev_arr[port_id - 1] = netdev;
-+
-+ /* Setup phylink. */
-+ ret = ppe_port_phylink_setup(port, netdev);
-+ if (ret) {
-+ netdev_dbg(netdev, "EDMA port phylink setup for netdevice %s\n",
-+ netdev->name);
-+ goto port_phylink_setup_fail;
-+ }
-+
-+ /* Register the network interface. */
-+ ret = register_netdev(netdev);
-+ if (ret) {
-+ netdev_dbg(netdev, "Error registering netdevice %s\n",
-+ netdev->name);
-+ goto register_netdev_fail;
-+ }
-+
-+ netdev_dbg(netdev, "Setup EDMA port GMAC%d done\n", port_id);
-+ return ret;
-+
-+register_netdev_fail:
-+ ppe_port_phylink_destroy(port);
-+port_phylink_setup_fail:
-+ free_netdev(netdev);
-+ edma_ctx->netdev_arr[port_id - 1] = NULL;
-+
-+ return ret;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.h
-@@ -0,0 +1,31 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef __EDMA_PORTS__
-+#define __EDMA_PORTS__
-+
-+#include "ppe_port.h"
-+
-+#define EDMA_NETDEV_FEATURES (NETIF_F_FRAGLIST \
-+ | NETIF_F_SG \
-+ | NETIF_F_RXCSUM \
-+ | NETIF_F_HW_CSUM \
-+ | NETIF_F_TSO \
-+ | NETIF_F_TSO6)
-+
-+/**
-+ * struct edma_port_priv - EDMA port priv structure.
-+ * @ppe_port: Pointer to PPE port
-+ * @netdev: Corresponding netdevice
-+ * @flags: Feature flags
-+ */
-+struct edma_port_priv {
-+ struct ppe_port *ppe_port;
-+ struct net_device *netdev;
-+ unsigned long flags;
-+};
-+
-+void edma_port_destroy(struct ppe_port *port);
-+int edma_port_setup(struct ppe_port *port);
-+#endif
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
-@@ -13,6 +13,7 @@
- #include <linux/regmap.h>
- #include <linux/rtnetlink.h>
-
-+#include "edma_port.h"
- #include "ppe.h"
- #include "ppe_port.h"
- #include "ppe_regs.h"
-@@ -1277,12 +1278,26 @@ int ppe_port_mac_init(struct ppe_device
- goto err_port_node;
- }
-
-+ ret = edma_port_setup(&ppe_ports->port[i]);
-+ if (ret) {
-+ dev_err(ppe_dev->dev, "QCOM EDMA port setup failed\n");
-+ i--;
-+ goto err_port_setup;
-+ }
-+
- i++;
- }
-
- of_node_put(ports_node);
- return 0;
-
-+err_port_setup:
-+ /* Destroy edma ports created till now */
-+ while (i >= 0) {
-+ edma_port_destroy(&ppe_ports->port[i]);
-+ i--;
-+ }
-+
- err_port_clk:
- for (j = 0; j < i; j++)
- ppe_port_clock_deinit(&ppe_ports->port[j]);
-@@ -1307,6 +1322,10 @@ void ppe_port_mac_deinit(struct ppe_devi
-
- for (i = 0; i < ppe_dev->ports->num; i++) {
- ppe_port = &ppe_dev->ports->port[i];
-+
-+ /* Destroy all phylinks and edma ports */
-+ edma_port_destroy(ppe_port);
-+
- ppe_port_clock_deinit(ppe_port);
- }
- }
+++ /dev/null
-From 7c7baa32e0d110d5446113f5513fca84731bddd3 Mon Sep 17 00:00:00 2001
-From: Suruchi Agarwal <quic_suruchia@quicinc.com>
-Date: Thu, 21 Mar 2024 16:21:19 -0700
-Subject: [PATCH 40/50] net: ethernet: qualcomm: Add Rx Ethernet DMA support
-
-Add Rx queues, rings, descriptors configurations and
-DMA support for the EDMA.
-
-Change-Id: I612bcd661e74d5bf3ecb33de10fd5298d18ff7e9
-Co-developed-by: Pavithra R <quic_pavir@quicinc.com>
-Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
-Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
- drivers/net/ethernet/qualcomm/ppe/edma.c | 171 +++-
- drivers/net/ethernet/qualcomm/ppe/edma.h | 18 +-
- .../net/ethernet/qualcomm/ppe/edma_cfg_rx.c | 964 ++++++++++++++++++
- .../net/ethernet/qualcomm/ppe/edma_cfg_rx.h | 48 +
- drivers/net/ethernet/qualcomm/ppe/edma_port.c | 39 +-
- drivers/net/ethernet/qualcomm/ppe/edma_port.h | 31 +
- drivers/net/ethernet/qualcomm/ppe/edma_rx.c | 622 +++++++++++
- drivers/net/ethernet/qualcomm/ppe/edma_rx.h | 287 ++++++
- 9 files changed, 2177 insertions(+), 5 deletions(-)
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_rx.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_rx.h
-
---- a/drivers/net/ethernet/qualcomm/ppe/Makefile
-+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
-@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
- qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
-
- #EDMA
--qcom-ppe-objs += edma.o edma_port.o
-\ No newline at end of file
-+qcom-ppe-objs += edma.o edma_cfg_rx.o edma_port.o edma_rx.o
-\ No newline at end of file
---- a/drivers/net/ethernet/qualcomm/ppe/edma.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
-@@ -18,12 +18,23 @@
- #include <linux/reset.h>
-
- #include "edma.h"
-+#include "edma_cfg_rx.h"
- #include "ppe_regs.h"
-
- #define EDMA_IRQ_NAME_SIZE 32
-
- /* Global EDMA context. */
- struct edma_context *edma_ctx;
-+static char **edma_rxdesc_irq_name;
-+
-+/* Module params. */
-+static int page_mode;
-+module_param(page_mode, int, 0);
-+MODULE_PARM_DESC(page_mode, "Enable page mode (default:0)");
-+
-+static int rx_buff_size;
-+module_param(rx_buff_size, int, 0640);
-+MODULE_PARM_DESC(rx_buff_size, "Rx Buffer size for Jumbo MRU value (default:0)");
-
- /* Priority to multi-queue mapping. */
- static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
-@@ -178,6 +189,59 @@ static int edma_configure_ucast_prio_map
- return ret;
- }
-
-+static int edma_irq_register(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ int ret;
-+ u32 i;
-+
-+ /* Request IRQ for RXDESC rings. */
-+ edma_rxdesc_irq_name = kzalloc((sizeof(char *) * rx->num_rings),
-+ GFP_KERNEL);
-+ if (!edma_rxdesc_irq_name)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < rx->num_rings; i++) {
-+ edma_rxdesc_irq_name[i] = kzalloc((sizeof(char *) * EDMA_IRQ_NAME_SIZE),
-+ GFP_KERNEL);
-+ if (!edma_rxdesc_irq_name[i]) {
-+ ret = -ENOMEM;
-+ goto rxdesc_irq_name_alloc_fail;
-+ }
-+
-+ snprintf(edma_rxdesc_irq_name[i], 20, "edma_rxdesc_%d",
-+ rx->ring_start + i);
-+
-+ irq_set_status_flags(edma_ctx->intr_info.intr_rx[i], IRQ_DISABLE_UNLAZY);
-+
-+ ret = request_irq(edma_ctx->intr_info.intr_rx[i],
-+ edma_rx_handle_irq, IRQF_SHARED,
-+ edma_rxdesc_irq_name[i],
-+ (void *)&edma_ctx->rx_rings[i]);
-+ if (ret) {
-+ pr_err("RXDESC ring IRQ:%d request failed\n",
-+ edma_ctx->intr_info.intr_rx[i]);
-+ goto rx_desc_ring_intr_req_fail;
-+ }
-+
-+ pr_debug("RXDESC ring: %d IRQ:%d request success: %s\n",
-+ rx->ring_start + i,
-+ edma_ctx->intr_info.intr_rx[i],
-+ edma_rxdesc_irq_name[i]);
-+ }
-+
-+ return 0;
-+
-+rx_desc_ring_intr_req_fail:
-+ for (i = 0; i < rx->num_rings; i++)
-+ kfree(edma_rxdesc_irq_name[i]);
-+rxdesc_irq_name_alloc_fail:
-+ kfree(edma_rxdesc_irq_name);
-+
-+ return ret;
-+}
-+
- static int edma_irq_init(void)
- {
- struct edma_hw_info *hw_info = edma_ctx->hw_info;
-@@ -260,6 +324,16 @@ static int edma_irq_init(void)
- return 0;
- }
-
-+static int edma_alloc_rings(void)
-+{
-+ if (edma_cfg_rx_rings_alloc()) {
-+ pr_err("Error in allocating Rx rings\n");
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
-+
- static int edma_hw_reset(void)
- {
- struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-@@ -343,6 +417,40 @@ static int edma_hw_configure(void)
- if (!edma_ctx->netdev_arr)
- return -ENOMEM;
-
-+ edma_ctx->dummy_dev = alloc_netdev_dummy(0);
-+ if (!edma_ctx->dummy_dev) {
-+ ret = -ENOMEM;
-+ pr_err("Failed to allocate dummy device. ret: %d\n", ret);
-+ goto dummy_dev_alloc_failed;
-+ }
-+
-+ /* Set EDMA jumbo MRU if enabled or set page mode. */
-+ if (edma_ctx->rx_buf_size) {
-+ edma_ctx->rx_page_mode = false;
-+ pr_debug("Rx Jumbo mru is enabled: %d\n", edma_ctx->rx_buf_size);
-+ } else {
-+ edma_ctx->rx_page_mode = page_mode;
-+ }
-+
-+ ret = edma_alloc_rings();
-+ if (ret) {
-+ pr_err("Error in initializaing the rings. ret: %d\n", ret);
-+ goto edma_alloc_rings_failed;
-+ }
-+
-+ /* Disable interrupts. */
-+ edma_cfg_rx_disable_interrupts();
-+
-+ edma_cfg_rx_rings_disable();
-+
-+ edma_cfg_rx_ring_mappings();
-+
-+ ret = edma_cfg_rx_rings();
-+ if (ret) {
-+ pr_err("Error in configuring Rx rings. ret: %d\n", ret);
-+ goto edma_cfg_rx_rings_failed;
-+ }
-+
- /* Configure DMA request priority, DMA read burst length,
- * and AXI write size.
- */
-@@ -376,6 +484,10 @@ static int edma_hw_configure(void)
- data |= EDMA_MISC_TX_TIMEOUT_MASK;
- edma_ctx->intr_info.intr_mask_misc = data;
-
-+ edma_cfg_rx_rings_enable();
-+ edma_cfg_rx_napi_add();
-+ edma_cfg_rx_napi_enable();
-+
- /* Global EDMA enable and padding enable. */
- data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN;
-
-@@ -389,11 +501,32 @@ static int edma_hw_configure(void)
- if (ret) {
- pr_err("Failed to initialize unicast priority map table: %d\n",
- ret);
-- kfree(edma_ctx->netdev_arr);
-- return ret;
-+ goto configure_ucast_prio_map_tbl_failed;
-+ }
-+
-+ /* Initialize RPS hash map table. */
-+ ret = edma_cfg_rx_rps_hash_map();
-+ if (ret) {
-+ pr_err("Failed to configure rps hash table: %d\n",
-+ ret);
-+ goto edma_cfg_rx_rps_hash_map_failed;
- }
-
- return 0;
-+
-+edma_cfg_rx_rps_hash_map_failed:
-+configure_ucast_prio_map_tbl_failed:
-+ edma_cfg_rx_napi_disable();
-+ edma_cfg_rx_napi_delete();
-+ edma_cfg_rx_rings_disable();
-+edma_cfg_rx_rings_failed:
-+ edma_cfg_rx_rings_cleanup();
-+edma_alloc_rings_failed:
-+ free_netdev(edma_ctx->dummy_dev);
-+dummy_dev_alloc_failed:
-+ kfree(edma_ctx->netdev_arr);
-+
-+ return ret;
- }
-
- /**
-@@ -404,8 +537,31 @@ static int edma_hw_configure(void)
- */
- void edma_destroy(struct ppe_device *ppe_dev)
- {
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i;
-+
-+ /* Disable interrupts. */
-+ edma_cfg_rx_disable_interrupts();
-+
-+ /* Free IRQ for RXDESC rings. */
-+ for (i = 0; i < rx->num_rings; i++) {
-+ synchronize_irq(edma_ctx->intr_info.intr_rx[i]);
-+ free_irq(edma_ctx->intr_info.intr_rx[i],
-+ (void *)&edma_ctx->rx_rings[i]);
-+ kfree(edma_rxdesc_irq_name[i]);
-+ }
-+ kfree(edma_rxdesc_irq_name);
-+
- kfree(edma_ctx->intr_info.intr_rx);
- kfree(edma_ctx->intr_info.intr_txcmpl);
-+
-+ edma_cfg_rx_napi_disable();
-+ edma_cfg_rx_napi_delete();
-+ edma_cfg_rx_rings_disable();
-+ edma_cfg_rx_rings_cleanup();
-+
-+ free_netdev(edma_ctx->dummy_dev);
- kfree(edma_ctx->netdev_arr);
- }
-
-@@ -428,6 +584,7 @@ int edma_setup(struct ppe_device *ppe_de
-
- edma_ctx->hw_info = &ipq9574_hw_info;
- edma_ctx->ppe_dev = ppe_dev;
-+ edma_ctx->rx_buf_size = rx_buff_size;
-
- /* Configure the EDMA common clocks. */
- ret = edma_clock_init();
-@@ -450,6 +607,16 @@ int edma_setup(struct ppe_device *ppe_de
- return ret;
- }
-
-+ ret = edma_irq_register();
-+ if (ret) {
-+ dev_err(dev, "Error in irq registration\n");
-+ kfree(edma_ctx->intr_info.intr_rx);
-+ kfree(edma_ctx->intr_info.intr_txcmpl);
-+ return ret;
-+ }
-+
-+ edma_cfg_rx_enable_interrupts();
-+
- dev_info(dev, "EDMA configuration successful\n");
-
- return 0;
---- a/drivers/net/ethernet/qualcomm/ppe/edma.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
-@@ -6,6 +6,7 @@
- #define __EDMA_MAIN__
-
- #include "ppe_api.h"
-+#include "edma_rx.h"
-
- /* One clock cycle = 1/(EDMA clock frequency in Mhz) micro seconds.
- *
-@@ -29,6 +30,11 @@
- /* Interface ID start. */
- #define EDMA_START_IFNUM 1
-
-+#define EDMA_DESC_AVAIL_COUNT(head, tail, _max) ({ \
-+ typeof(_max) (max) = (_max); \
-+ ((((head) - (tail)) + \
-+ (max)) & ((max) - 1)); })
-+
- /**
- * struct edma_ring_info - EDMA ring data structure.
- * @max_rings: Maximum number of rings
-@@ -82,18 +88,28 @@ struct edma_intr_info {
- /**
- * struct edma_context - EDMA context.
- * @netdev_arr: Net device for each EDMA port
-+ * @dummy_dev: Dummy netdevice for RX DMA
- * @ppe_dev: PPE device
- * @hw_info: EDMA Hardware info
- * @intr_info: EDMA Interrupt info
-+ * @rxfill_rings: Rx fill Rings, SW is producer
-+ * @rx_rings: Rx Desc Rings, SW is consumer
-+ * @rx_page_mode: Page mode enabled or disabled
-+ * @rx_buf_size: Rx buffer size for Jumbo MRU
- */
- struct edma_context {
- struct net_device **netdev_arr;
-+ struct net_device *dummy_dev;
- struct ppe_device *ppe_dev;
- struct edma_hw_info *hw_info;
- struct edma_intr_info intr_info;
-+ struct edma_rxfill_ring *rxfill_rings;
-+ struct edma_rxdesc_ring *rx_rings;
-+ u32 rx_page_mode;
-+ u32 rx_buf_size;
- };
-
--/* Global EDMA context. */
-+/* Global EDMA context */
- extern struct edma_context *edma_ctx;
-
- void edma_destroy(struct ppe_device *ppe_dev);
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
-@@ -0,0 +1,964 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* Configure rings, Buffers and NAPI for receive path along with
-+ * providing APIs to enable, disable, clean and map the Rx rings.
-+ */
-+
-+#include <linux/cpumask.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/kernel.h>
-+#include <linux/netdevice.h>
-+#include <linux/printk.h>
-+#include <linux/regmap.h>
-+#include <linux/skbuff.h>
-+
-+#include "edma.h"
-+#include "edma_cfg_rx.h"
-+#include "ppe.h"
-+#include "ppe_regs.h"
-+
-+/* EDMA Queue ID to Ring ID Table. */
-+#define EDMA_QID2RID_TABLE_MEM(q) (0xb9000 + (0x4 * (q)))
-+
-+/* Rx ring queue offset. */
-+#define EDMA_QUEUE_OFFSET(q_id) ((q_id) / EDMA_MAX_PRI_PER_CORE)
-+
-+/* Rx EDMA maximum queue supported. */
-+#define EDMA_CPU_PORT_QUEUE_MAX(queue_start) \
-+ ((queue_start) + (EDMA_MAX_PRI_PER_CORE * num_possible_cpus()) - 1)
-+
-+/* EDMA Queue ID to Ring ID configuration. */
-+#define EDMA_QID2RID_NUM_PER_REG 4
-+
-+int rx_queues[] = {0, 8, 16, 24};
-+
-+static u32 edma_rx_ring_queue_map[][EDMA_MAX_CORE] = {{ 0, 8, 16, 24 },
-+ { 1, 9, 17, 25 },
-+ { 2, 10, 18, 26 },
-+ { 3, 11, 19, 27 },
-+ { 4, 12, 20, 28 },
-+ { 5, 13, 21, 29 },
-+ { 6, 14, 22, 30 },
-+ { 7, 15, 23, 31 }};
-+
-+static int edma_cfg_rx_desc_rings_reset_queue_mapping(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i, ret;
-+
-+ for (i = 0; i < rx->num_rings; i++) {
-+ struct edma_rxdesc_ring *rxdesc_ring;
-+
-+ rxdesc_ring = &edma_ctx->rx_rings[i];
-+
-+ ret = ppe_edma_ring_to_queues_config(edma_ctx->ppe_dev, rxdesc_ring->ring_id,
-+ ARRAY_SIZE(rx_queues), rx_queues);
-+ if (ret) {
-+ pr_err("Error in unmapping rxdesc ring %d to PPE queue mapping to disable its backpressure configuration\n",
-+ i);
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+static int edma_cfg_rx_desc_ring_reset_queue_priority(u32 rxdesc_ring_idx)
-+{
-+ u32 i, queue_id, ret;
-+
-+ for (i = 0; i < EDMA_MAX_PRI_PER_CORE; i++) {
-+ queue_id = edma_rx_ring_queue_map[i][rxdesc_ring_idx];
-+
-+ ret = ppe_queue_priority_set(edma_ctx->ppe_dev, queue_id, i);
-+ if (ret) {
-+ pr_err("Error in resetting %u queue's priority\n",
-+ queue_id);
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+static int edma_cfg_rx_desc_ring_reset_queue_config(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i, ret;
-+
-+ if (unlikely(rx->num_rings > num_possible_cpus())) {
-+ pr_err("Invalid count of rxdesc rings: %d\n",
-+ rx->num_rings);
-+ return -EINVAL;
-+ }
-+
-+ /* Unmap Rxdesc ring to PPE queue mapping */
-+ ret = edma_cfg_rx_desc_rings_reset_queue_mapping();
-+ if (ret) {
-+ pr_err("Error in resetting Rx desc ring backpressure config\n");
-+ return ret;
-+ }
-+
-+ /* Reset the priority for PPE queues mapped to Rx rings */
-+ for (i = 0; i < rx->num_rings; i++) {
-+ ret = edma_cfg_rx_desc_ring_reset_queue_priority(i);
-+ if (ret) {
-+ pr_err("Error in resetting ring:%d queue's priority\n",
-+ i + rx->ring_start);
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+static int edma_cfg_rx_desc_ring_to_queue_mapping(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i;
-+ int ret;
-+
-+ /* Rxdesc ring to PPE queue mapping */
-+ for (i = 0; i < rx->num_rings; i++) {
-+ struct edma_rxdesc_ring *rxdesc_ring;
-+
-+ rxdesc_ring = &edma_ctx->rx_rings[i];
-+
-+ ret = ppe_edma_ring_to_queues_config(edma_ctx->ppe_dev,
-+ rxdesc_ring->ring_id,
-+ ARRAY_SIZE(rx_queues), rx_queues);
-+ if (ret) {
-+ pr_err("Error in configuring Rx ring to PPE queue mapping, ret: %d, id: %d\n",
-+ ret, rxdesc_ring->ring_id);
-+ if (!edma_cfg_rx_desc_rings_reset_queue_mapping())
-+ pr_err("Error in resetting Rx desc ringbackpressure configurations\n");
-+
-+ return ret;
-+ }
-+
-+ pr_debug("Rx desc ring %d to PPE queue mapping for backpressure:\n",
-+ rxdesc_ring->ring_id);
-+ }
-+
-+ return 0;
-+}
-+
-+static void edma_cfg_rx_desc_ring_configure(struct edma_rxdesc_ring *rxdesc_ring)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 data, reg;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_BA(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, (u32)(rxdesc_ring->pdma & EDMA_RXDESC_BA_MASK));
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_PREHEADER_BA(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, (u32)(rxdesc_ring->sdma & EDMA_RXDESC_PREHEADER_BA_MASK));
-+
-+ data = rxdesc_ring->count & EDMA_RXDESC_RING_SIZE_MASK;
-+ data |= (EDMA_RXDESC_PL_DEFAULT_VALUE & EDMA_RXDESC_PL_OFFSET_MASK)
-+ << EDMA_RXDESC_PL_OFFSET_SHIFT;
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_RING_SIZE(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, data);
-+
-+ /* Configure the Mitigation timer */
-+ data = EDMA_MICROSEC_TO_TIMER_UNIT(EDMA_RX_MITIGATION_TIMER_DEF,
-+ ppe_dev->clk_rate / MHZ);
-+ data = ((data & EDMA_RX_MOD_TIMER_INIT_MASK)
-+ << EDMA_RX_MOD_TIMER_INIT_SHIFT);
-+ pr_debug("EDMA Rx mitigation timer value: %d\n", data);
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RX_MOD_TIMER(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, data);
-+
-+ /* Configure the Mitigation packet count */
-+ data = (EDMA_RX_MITIGATION_PKT_CNT_DEF & EDMA_RXDESC_LOW_THRE_MASK)
-+ << EDMA_RXDESC_LOW_THRE_SHIFT;
-+ pr_debug("EDMA Rx mitigation packet count value: %d\n", data);
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_UGT_THRE(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, data);
-+
-+ /* Enable ring. Set ret mode to 'opaque'. */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RX_INT_CTRL(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, EDMA_RX_NE_INT_EN);
-+}
-+
-+static void edma_cfg_rx_qid_to_rx_desc_ring_mapping(void)
-+{
-+ u32 desc_index, ring_index, reg_index, data, q_id;
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 mcast_start, mcast_end, reg;
-+ int ret;
-+
-+ desc_index = (rx->ring_start & EDMA_RX_RING_ID_MASK);
-+
-+ /* Here map all the queues to ring. */
-+ for (q_id = EDMA_RX_QUEUE_START;
-+ q_id <= EDMA_CPU_PORT_QUEUE_MAX(EDMA_RX_QUEUE_START);
-+ q_id += EDMA_QID2RID_NUM_PER_REG) {
-+ reg_index = q_id / EDMA_QID2RID_NUM_PER_REG;
-+ ring_index = desc_index + EDMA_QUEUE_OFFSET(q_id);
-+
-+ data = FIELD_PREP(EDMA_RX_RING_ID_QUEUE0_MASK, ring_index);
-+ data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE1_MASK, ring_index);
-+ data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE2_MASK, ring_index);
-+ data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE3_MASK, ring_index);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_QID2RID_TABLE_MEM(reg_index);
-+ regmap_write(regmap, reg, data);
-+ pr_debug("Configure QID2RID: %d reg:0x%x to 0x%x, desc_index: %d, reg_index: %d\n",
-+ q_id, EDMA_QID2RID_TABLE_MEM(reg_index), data, desc_index, reg_index);
-+ }
-+
-+ ret = ppe_edma_queue_resource_get(edma_ctx->ppe_dev, PPE_RES_MCAST,
-+ &mcast_start, &mcast_end);
-+ if (ret < 0) {
-+ pr_err("Error in extracting multicast queue values\n");
-+ return;
-+ }
-+
-+ /* Map multicast queues to the first Rx ring. */
-+ desc_index = (rx->ring_start & EDMA_RX_RING_ID_MASK);
-+ for (q_id = mcast_start; q_id <= mcast_end;
-+ q_id += EDMA_QID2RID_NUM_PER_REG) {
-+ reg_index = q_id / EDMA_QID2RID_NUM_PER_REG;
-+
-+ data = FIELD_PREP(EDMA_RX_RING_ID_QUEUE0_MASK, desc_index);
-+ data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE1_MASK, desc_index);
-+ data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE2_MASK, desc_index);
-+ data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE3_MASK, desc_index);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_QID2RID_TABLE_MEM(reg_index);
-+ regmap_write(regmap, reg, data);
-+
-+ pr_debug("Configure QID2RID: %d reg:0x%x to 0x%x\n",
-+ q_id, EDMA_QID2RID_TABLE_MEM(reg_index), data);
-+ }
-+}
-+
-+static void edma_cfg_rx_rings_to_rx_fill_mapping(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i, data, reg;
-+
-+ regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_0_ADDR, 0);
-+ regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_1_ADDR, 0);
-+ regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_2_ADDR, 0);
-+
-+ for (i = 0; i < rx->num_rings; i++) {
-+ struct edma_rxdesc_ring *rxdesc_ring = &edma_ctx->rx_rings[i];
-+ u32 data, reg, ring_id;
-+
-+ ring_id = rxdesc_ring->ring_id;
-+ if (ring_id >= 0 && ring_id <= 9)
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_0_ADDR;
-+ else if (ring_id >= 10 && ring_id <= 19)
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_1_ADDR;
-+ else
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_2_ADDR;
-+
-+ pr_debug("Configure RXDESC:%u to use RXFILL:%u\n",
-+ ring_id,
-+ rxdesc_ring->rxfill->ring_id);
-+
-+ /* Set the Rx fill ring number in the mapping register. */
-+ regmap_read(regmap, reg, &data);
-+ data |= (rxdesc_ring->rxfill->ring_id &
-+ EDMA_RXDESC2FILL_MAP_RXDESC_MASK) <<
-+ ((ring_id % 10) * 3);
-+ regmap_write(regmap, reg, data);
-+ }
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_0_ADDR;
-+ regmap_read(regmap, reg, &data);
-+ pr_debug("EDMA_REG_RXDESC2FILL_MAP_0_ADDR: 0x%x\n", data);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_1_ADDR;
-+ regmap_read(regmap, reg, &data);
-+ pr_debug("EDMA_REG_RXDESC2FILL_MAP_1_ADDR: 0x%x\n", data);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_2_ADDR;
-+ regmap_read(regmap, reg, &data);
-+ pr_debug("EDMA_REG_RXDESC2FILL_MAP_2_ADDR: 0x%x\n", data);
-+}
-+
-+/**
-+ * edma_cfg_rx_rings_enable - Enable Rx and Rxfill rings
-+ *
-+ * Enable Rx and Rxfill rings.
-+ */
-+void edma_cfg_rx_rings_enable(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rxfill = hw_info->rxfill;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i, reg;
-+
-+ /* Enable Rx rings */
-+ for (i = rx->ring_start; i < rx->ring_start + rx->num_rings; i++) {
-+ u32 data;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CTRL(i);
-+ regmap_read(regmap, reg, &data);
-+ data |= EDMA_RXDESC_RX_EN;
-+ regmap_write(regmap, reg, data);
-+ }
-+
-+ for (i = rxfill->ring_start; i < rxfill->ring_start + rxfill->num_rings; i++) {
-+ u32 data;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_RING_EN(i);
-+ regmap_read(regmap, reg, &data);
-+ data |= EDMA_RXFILL_RING_EN;
-+ regmap_write(regmap, reg, data);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_rx_rings_disable - Disable Rx and Rxfill rings
-+ *
-+ * Disable Rx and Rxfill rings.
-+ */
-+void edma_cfg_rx_rings_disable(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rxfill = hw_info->rxfill;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i, reg;
-+
-+ /* Disable Rx rings */
-+ for (i = 0; i < rx->num_rings; i++) {
-+ struct edma_rxdesc_ring *rxdesc_ring = NULL;
-+ u32 data;
-+
-+ rxdesc_ring = &edma_ctx->rx_rings[i];
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CTRL(rxdesc_ring->ring_id);
-+ regmap_read(regmap, reg, &data);
-+ data &= ~EDMA_RXDESC_RX_EN;
-+ regmap_write(regmap, reg, data);
-+ }
-+
-+ /* Disable RxFill Rings */
-+ for (i = 0; i < rxfill->num_rings; i++) {
-+ struct edma_rxfill_ring *rxfill_ring = NULL;
-+ u32 data;
-+
-+ rxfill_ring = &edma_ctx->rxfill_rings[i];
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_RING_EN(rxfill_ring->ring_id);
-+ regmap_read(regmap, reg, &data);
-+ data &= ~EDMA_RXFILL_RING_EN;
-+ regmap_write(regmap, reg, data);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_rx_mappings - Setup RX ring mapping
-+ *
-+ * Setup queue ID to Rx desc ring mapping.
-+ */
-+void edma_cfg_rx_ring_mappings(void)
-+{
-+ edma_cfg_rx_qid_to_rx_desc_ring_mapping();
-+ edma_cfg_rx_rings_to_rx_fill_mapping();
-+}
-+
-+static void edma_cfg_rx_fill_ring_cleanup(struct edma_rxfill_ring *rxfill_ring)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct device *dev = ppe_dev->dev;
-+ u16 cons_idx, curr_idx;
-+ u32 data, reg;
-+
-+ /* Get RxFill ring producer index */
-+ curr_idx = rxfill_ring->prod_idx & EDMA_RXFILL_PROD_IDX_MASK;
-+
-+ /* Get RxFill ring consumer index */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_CONS_IDX(rxfill_ring->ring_id);
-+ regmap_read(regmap, reg, &data);
-+ cons_idx = data & EDMA_RXFILL_CONS_IDX_MASK;
-+
-+ while (curr_idx != cons_idx) {
-+ struct edma_rxfill_desc *rxfill_desc;
-+ struct sk_buff *skb;
-+
-+ /* Get RxFill descriptor */
-+ rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, cons_idx);
-+
-+ cons_idx = (cons_idx + 1) & EDMA_RX_RING_SIZE_MASK;
-+
-+ /* Get skb from opaque */
-+ skb = (struct sk_buff *)EDMA_RXFILL_OPAQUE_GET(rxfill_desc);
-+ if (unlikely(!skb)) {
-+ pr_err("Empty skb reference at index:%d\n",
-+ cons_idx);
-+ continue;
-+ }
-+
-+ dev_kfree_skb_any(skb);
-+ }
-+
-+ /* Free RxFill ring descriptors */
-+ dma_free_coherent(dev, (sizeof(struct edma_rxfill_desc)
-+ * rxfill_ring->count),
-+ rxfill_ring->desc, rxfill_ring->dma);
-+ rxfill_ring->desc = NULL;
-+ rxfill_ring->dma = (dma_addr_t)0;
-+}
-+
-+static int edma_cfg_rx_fill_ring_dma_alloc(struct edma_rxfill_ring *rxfill_ring)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct device *dev = ppe_dev->dev;
-+
-+ /* Allocate RxFill ring descriptors */
-+ rxfill_ring->desc = dma_alloc_coherent(dev, (sizeof(struct edma_rxfill_desc)
-+ * rxfill_ring->count),
-+ &rxfill_ring->dma,
-+ GFP_KERNEL | __GFP_ZERO);
-+ if (unlikely(!rxfill_ring->desc))
-+ return -ENOMEM;
-+
-+ return 0;
-+}
-+
-+static int edma_cfg_rx_desc_ring_dma_alloc(struct edma_rxdesc_ring *rxdesc_ring)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct device *dev = ppe_dev->dev;
-+
-+ rxdesc_ring->pdesc = dma_alloc_coherent(dev, (sizeof(struct edma_rxdesc_pri)
-+ * rxdesc_ring->count),
-+ &rxdesc_ring->pdma, GFP_KERNEL | __GFP_ZERO);
-+ if (unlikely(!rxdesc_ring->pdesc))
-+ return -ENOMEM;
-+
-+ rxdesc_ring->sdesc = dma_alloc_coherent(dev, (sizeof(struct edma_rxdesc_sec)
-+ * rxdesc_ring->count),
-+ &rxdesc_ring->sdma, GFP_KERNEL | __GFP_ZERO);
-+ if (unlikely(!rxdesc_ring->sdesc)) {
-+ dma_free_coherent(dev, (sizeof(struct edma_rxdesc_pri)
-+ * rxdesc_ring->count),
-+ rxdesc_ring->pdesc,
-+ rxdesc_ring->pdma);
-+ rxdesc_ring->pdesc = NULL;
-+ rxdesc_ring->pdma = (dma_addr_t)0;
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
-+
-+static void edma_cfg_rx_desc_ring_cleanup(struct edma_rxdesc_ring *rxdesc_ring)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct device *dev = ppe_dev->dev;
-+ u32 prod_idx, cons_idx, reg;
-+
-+ /* Get Rxdesc consumer & producer indices */
-+ cons_idx = rxdesc_ring->cons_idx & EDMA_RXDESC_CONS_IDX_MASK;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->ring_id);
-+ regmap_read(regmap, reg, &prod_idx);
-+ prod_idx = prod_idx & EDMA_RXDESC_PROD_IDX_MASK;
-+
-+ /* Free any buffers assigned to any descriptors */
-+ while (cons_idx != prod_idx) {
-+ struct edma_rxdesc_pri *rxdesc_pri =
-+ EDMA_RXDESC_PRI_DESC(rxdesc_ring, cons_idx);
-+ struct sk_buff *skb;
-+
-+ /* Update consumer index */
-+ cons_idx = (cons_idx + 1) & EDMA_RX_RING_SIZE_MASK;
-+
-+ /* Get opaque from Rxdesc */
-+ skb = (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(rxdesc_pri);
-+ if (unlikely(!skb)) {
-+ pr_warn("Empty skb reference at index:%d\n",
-+ cons_idx);
-+ continue;
-+ }
-+
-+ dev_kfree_skb_any(skb);
-+ }
-+
-+ /* Update the consumer index */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, cons_idx);
-+
-+ /* Free Rxdesc ring descriptor */
-+ dma_free_coherent(dev, (sizeof(struct edma_rxdesc_pri)
-+ * rxdesc_ring->count), rxdesc_ring->pdesc,
-+ rxdesc_ring->pdma);
-+ rxdesc_ring->pdesc = NULL;
-+ rxdesc_ring->pdma = (dma_addr_t)0;
-+
-+ /* Free any buffers assigned to any secondary ring descriptors */
-+ dma_free_coherent(dev, (sizeof(struct edma_rxdesc_sec)
-+ * rxdesc_ring->count), rxdesc_ring->sdesc,
-+ rxdesc_ring->sdma);
-+ rxdesc_ring->sdesc = NULL;
-+ rxdesc_ring->sdma = (dma_addr_t)0;
-+}
-+
-+static int edma_cfg_rx_rings_setup(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rxfill = hw_info->rxfill;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 ring_idx, alloc_size, buf_len;
-+
-+ /* Set buffer allocation size */
-+ if (edma_ctx->rx_buf_size) {
-+ alloc_size = edma_ctx->rx_buf_size +
-+ EDMA_RX_SKB_HEADROOM + NET_IP_ALIGN;
-+ buf_len = alloc_size - EDMA_RX_SKB_HEADROOM - NET_IP_ALIGN;
-+ } else if (edma_ctx->rx_page_mode) {
-+ alloc_size = EDMA_RX_PAGE_MODE_SKB_SIZE +
-+ EDMA_RX_SKB_HEADROOM + NET_IP_ALIGN;
-+ buf_len = PAGE_SIZE;
-+ } else {
-+ alloc_size = EDMA_RX_BUFFER_SIZE;
-+ buf_len = alloc_size - EDMA_RX_SKB_HEADROOM - NET_IP_ALIGN;
-+ }
-+
-+ pr_debug("EDMA ctx:%p rx_ring alloc_size=%d, buf_len=%d\n",
-+ edma_ctx, alloc_size, buf_len);
-+
-+ /* Allocate Rx fill ring descriptors */
-+ for (ring_idx = 0; ring_idx < rxfill->num_rings; ring_idx++) {
-+ u32 ret;
-+ struct edma_rxfill_ring *rxfill_ring = NULL;
-+
-+ rxfill_ring = &edma_ctx->rxfill_rings[ring_idx];
-+ rxfill_ring->count = EDMA_RX_RING_SIZE;
-+ rxfill_ring->ring_id = rxfill->ring_start + ring_idx;
-+ rxfill_ring->alloc_size = alloc_size;
-+ rxfill_ring->buf_len = buf_len;
-+ rxfill_ring->page_mode = edma_ctx->rx_page_mode;
-+
-+ ret = edma_cfg_rx_fill_ring_dma_alloc(rxfill_ring);
-+ if (ret) {
-+ pr_err("Error in setting up %d rxfill ring. ret: %d",
-+ rxfill_ring->ring_id, ret);
-+ while (--ring_idx >= 0)
-+ edma_cfg_rx_fill_ring_cleanup(&edma_ctx->rxfill_rings[ring_idx]);
-+
-+ return -ENOMEM;
-+ }
-+ }
-+
-+ /* Allocate RxDesc ring descriptors */
-+ for (ring_idx = 0; ring_idx < rx->num_rings; ring_idx++) {
-+ u32 index, queue_id = EDMA_RX_QUEUE_START;
-+ struct edma_rxdesc_ring *rxdesc_ring = NULL;
-+ u32 ret;
-+
-+ rxdesc_ring = &edma_ctx->rx_rings[ring_idx];
-+ rxdesc_ring->count = EDMA_RX_RING_SIZE;
-+ rxdesc_ring->ring_id = rx->ring_start + ring_idx;
-+
-+ if (queue_id > EDMA_CPU_PORT_QUEUE_MAX(EDMA_RX_QUEUE_START)) {
-+ pr_err("Invalid queue_id: %d\n", queue_id);
-+ while (--ring_idx >= 0)
-+ edma_cfg_rx_desc_ring_cleanup(&edma_ctx->rx_rings[ring_idx]);
-+
-+ goto rxdesc_mem_alloc_fail;
-+ }
-+
-+ /* Create a mapping between RX Desc ring and Rx fill ring.
-+ * Number of fill rings are lesser than the descriptor rings
-+ * Share the fill rings across descriptor rings.
-+ */
-+ index = rxfill->ring_start +
-+ (ring_idx % rxfill->num_rings);
-+ rxdesc_ring->rxfill = &edma_ctx->rxfill_rings[index
-+ - rxfill->ring_start];
-+
-+ ret = edma_cfg_rx_desc_ring_dma_alloc(rxdesc_ring);
-+ if (ret) {
-+ pr_err("Error in setting up %d rxdesc ring. ret: %d",
-+ rxdesc_ring->ring_id, ret);
-+ while (--ring_idx >= 0)
-+ edma_cfg_rx_desc_ring_cleanup(&edma_ctx->rx_rings[ring_idx]);
-+
-+ goto rxdesc_mem_alloc_fail;
-+ }
-+ }
-+
-+ pr_debug("Rx descriptor count for Rx desc and Rx fill rings : %d\n",
-+ EDMA_RX_RING_SIZE);
-+
-+ return 0;
-+
-+rxdesc_mem_alloc_fail:
-+ for (ring_idx = 0; ring_idx < rxfill->num_rings; ring_idx++)
-+ edma_cfg_rx_fill_ring_cleanup(&edma_ctx->rxfill_rings[ring_idx]);
-+
-+ return -ENOMEM;
-+}
-+
-+/**
-+ * edma_cfg_rx_buff_size_setup - Configure EDMA Rx jumbo buffer
-+ *
-+ * Configure EDMA Rx jumbo buffer
-+ */
-+void edma_cfg_rx_buff_size_setup(void)
-+{
-+ if (edma_ctx->rx_buf_size) {
-+ edma_ctx->rx_page_mode = false;
-+ pr_debug("Rx Jumbo mru is enabled: %d\n", edma_ctx->rx_buf_size);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_rx_rings_alloc - Allocate EDMA Rx rings
-+ *
-+ * Allocate EDMA Rx rings.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+int edma_cfg_rx_rings_alloc(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rxfill = hw_info->rxfill;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ int ret;
-+
-+ edma_ctx->rxfill_rings = kzalloc((sizeof(*edma_ctx->rxfill_rings) *
-+ rxfill->num_rings),
-+ GFP_KERNEL);
-+ if (!edma_ctx->rxfill_rings)
-+ return -ENOMEM;
-+
-+ edma_ctx->rx_rings = kzalloc((sizeof(*edma_ctx->rx_rings) *
-+ rx->num_rings),
-+ GFP_KERNEL);
-+ if (!edma_ctx->rx_rings)
-+ goto rxdesc_ring_alloc_fail;
-+
-+ pr_debug("RxDesc:%u rx (%u-%u) RxFill:%u (%u-%u)\n",
-+ rx->num_rings, rx->ring_start,
-+ (rx->ring_start + rx->num_rings - 1),
-+ rxfill->num_rings, rxfill->ring_start,
-+ (rxfill->ring_start + rxfill->num_rings - 1));
-+
-+ if (edma_cfg_rx_rings_setup()) {
-+ pr_err("Error in setting up Rx rings\n");
-+ goto rx_rings_setup_fail;
-+ }
-+
-+ /* Reset Rx descriptor ring mapped queue's configurations */
-+ ret = edma_cfg_rx_desc_ring_reset_queue_config();
-+ if (ret) {
-+ pr_err("Error in resetting the Rx descriptor rings configurations\n");
-+ edma_cfg_rx_rings_cleanup();
-+ return ret;
-+ }
-+
-+ return 0;
-+
-+rx_rings_setup_fail:
-+ kfree(edma_ctx->rx_rings);
-+ edma_ctx->rx_rings = NULL;
-+rxdesc_ring_alloc_fail:
-+ kfree(edma_ctx->rxfill_rings);
-+ edma_ctx->rxfill_rings = NULL;
-+
-+ return -ENOMEM;
-+}
-+
-+/**
-+ * edma_cfg_rx_rings_cleanup - Cleanup EDMA Rx rings
-+ *
-+ * Cleanup EDMA Rx rings
-+ */
-+void edma_cfg_rx_rings_cleanup(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rxfill = hw_info->rxfill;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i;
-+
-+ /* Free RxFill ring descriptors */
-+ for (i = 0; i < rxfill->num_rings; i++)
-+ edma_cfg_rx_fill_ring_cleanup(&edma_ctx->rxfill_rings[i]);
-+
-+ /* Free Rx completion ring descriptors */
-+ for (i = 0; i < rx->num_rings; i++)
-+ edma_cfg_rx_desc_ring_cleanup(&edma_ctx->rx_rings[i]);
-+
-+ kfree(edma_ctx->rxfill_rings);
-+ kfree(edma_ctx->rx_rings);
-+ edma_ctx->rxfill_rings = NULL;
-+ edma_ctx->rx_rings = NULL;
-+}
-+
-+static void edma_cfg_rx_fill_ring_configure(struct edma_rxfill_ring *rxfill_ring)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 ring_sz, reg;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_BA(rxfill_ring->ring_id);
-+ regmap_write(regmap, reg, (u32)(rxfill_ring->dma & EDMA_RING_DMA_MASK));
-+
-+ ring_sz = rxfill_ring->count & EDMA_RXFILL_RING_SIZE_MASK;
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_RING_SIZE(rxfill_ring->ring_id);
-+ regmap_write(regmap, reg, ring_sz);
-+
-+ edma_rx_alloc_buffer(rxfill_ring, rxfill_ring->count - 1);
-+}
-+
-+static void edma_cfg_rx_desc_ring_flow_control(u32 threshold_xoff, u32 threshold_xon)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 data, i, reg;
-+
-+ data = (threshold_xoff & EDMA_RXDESC_FC_XOFF_THRE_MASK) << EDMA_RXDESC_FC_XOFF_THRE_SHIFT;
-+ data |= ((threshold_xon & EDMA_RXDESC_FC_XON_THRE_MASK) << EDMA_RXDESC_FC_XON_THRE_SHIFT);
-+
-+ for (i = 0; i < rx->num_rings; i++) {
-+ struct edma_rxdesc_ring *rxdesc_ring;
-+
-+ rxdesc_ring = &edma_ctx->rx_rings[i];
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_FC_THRE(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, data);
-+ }
-+}
-+
-+static void edma_cfg_rx_fill_ring_flow_control(int threshold_xoff, int threshold_xon)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rxfill = hw_info->rxfill;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 data, i, reg;
-+
-+ data = (threshold_xoff & EDMA_RXFILL_FC_XOFF_THRE_MASK) << EDMA_RXFILL_FC_XOFF_THRE_SHIFT;
-+ data |= ((threshold_xon & EDMA_RXFILL_FC_XON_THRE_MASK) << EDMA_RXFILL_FC_XON_THRE_SHIFT);
-+
-+ for (i = 0; i < rxfill->num_rings; i++) {
-+ struct edma_rxfill_ring *rxfill_ring;
-+
-+ rxfill_ring = &edma_ctx->rxfill_rings[i];
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_FC_THRE(rxfill_ring->ring_id);
-+ regmap_write(regmap, reg, data);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_rx_rings - Configure EDMA Rx rings.
-+ *
-+ * Configure EDMA Rx rings.
-+ */
-+int edma_cfg_rx_rings(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rxfill = hw_info->rxfill;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i;
-+
-+ for (i = 0; i < rxfill->num_rings; i++)
-+ edma_cfg_rx_fill_ring_configure(&edma_ctx->rxfill_rings[i]);
-+
-+ for (i = 0; i < rx->num_rings; i++)
-+ edma_cfg_rx_desc_ring_configure(&edma_ctx->rx_rings[i]);
-+
-+ /* Configure Rx flow control configurations */
-+ edma_cfg_rx_desc_ring_flow_control(EDMA_RX_FC_XOFF_DEF, EDMA_RX_FC_XON_DEF);
-+ edma_cfg_rx_fill_ring_flow_control(EDMA_RX_FC_XOFF_DEF, EDMA_RX_FC_XON_DEF);
-+
-+ return edma_cfg_rx_desc_ring_to_queue_mapping();
-+}
-+
-+/**
-+ * edma_cfg_rx_disable_interrupts - EDMA disable RX interrupts
-+ *
-+ * Disable RX interrupt masks
-+ */
-+void edma_cfg_rx_disable_interrupts(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i, reg;
-+
-+ for (i = 0; i < rx->num_rings; i++) {
-+ struct edma_rxdesc_ring *rxdesc_ring =
-+ &edma_ctx->rx_rings[i];
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, EDMA_MASK_INT_CLEAR);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_rx_enable_interrupts - EDMA enable RX interrupts
-+ *
-+ * Enable RX interrupt masks
-+ */
-+void edma_cfg_rx_enable_interrupts(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i, reg;
-+
-+ for (i = 0; i < rx->num_rings; i++) {
-+ struct edma_rxdesc_ring *rxdesc_ring =
-+ &edma_ctx->rx_rings[i];
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_rx);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_rx_napi_disable - Disable NAPI for Rx
-+ *
-+ * Disable NAPI for Rx
-+ */
-+void edma_cfg_rx_napi_disable(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i;
-+
-+ for (i = 0; i < rx->num_rings; i++) {
-+ struct edma_rxdesc_ring *rxdesc_ring;
-+
-+ rxdesc_ring = &edma_ctx->rx_rings[i];
-+
-+ if (!rxdesc_ring->napi_added)
-+ continue;
-+
-+ napi_disable(&rxdesc_ring->napi);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_rx_napi_enable - Enable NAPI for Rx
-+ *
-+ * Enable NAPI for Rx
-+ */
-+void edma_cfg_rx_napi_enable(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i;
-+
-+ for (i = 0; i < rx->num_rings; i++) {
-+ struct edma_rxdesc_ring *rxdesc_ring;
-+
-+ rxdesc_ring = &edma_ctx->rx_rings[i];
-+
-+ if (!rxdesc_ring->napi_added)
-+ continue;
-+
-+ napi_enable(&rxdesc_ring->napi);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_rx_napi_delete - Delete Rx NAPI
-+ *
-+ * Delete RX NAPI
-+ */
-+void edma_cfg_rx_napi_delete(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i;
-+
-+ for (i = 0; i < rx->num_rings; i++) {
-+ struct edma_rxdesc_ring *rxdesc_ring;
-+
-+ rxdesc_ring = &edma_ctx->rx_rings[i];
-+
-+ if (!rxdesc_ring->napi_added)
-+ continue;
-+
-+ netif_napi_del(&rxdesc_ring->napi);
-+ rxdesc_ring->napi_added = false;
-+ }
-+}
-+
-+/* Add Rx NAPI */
-+/**
-+ * edma_cfg_rx_napi_add - Add Rx NAPI
-+ * @netdev: Netdevice
-+ *
-+ * Add RX NAPI
-+ */
-+void edma_cfg_rx_napi_add(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ u32 i;
-+
-+ for (i = 0; i < rx->num_rings; i++) {
-+ struct edma_rxdesc_ring *rxdesc_ring = &edma_ctx->rx_rings[i];
-+
-+ netif_napi_add_weight(edma_ctx->dummy_dev, &rxdesc_ring->napi,
-+ edma_rx_napi_poll, hw_info->napi_budget_rx);
-+ rxdesc_ring->napi_added = true;
-+ }
-+
-+ netdev_dbg(edma_ctx->dummy_dev, "Rx NAPI budget: %d\n", hw_info->napi_budget_rx);
-+}
-+
-+/**
-+ * edma_cfg_rx_rps_hash_map - Configure rx rps hash map.
-+ *
-+ * Initialize and configure RPS hash map for queues
-+ */
-+int edma_cfg_rx_rps_hash_map(void)
-+{
-+ cpumask_t edma_rps_cpumask = {{EDMA_RX_DEFAULT_BITMAP}};
-+ int map_len = 0, idx = 0, ret = 0;
-+ u32 q_off = EDMA_RX_QUEUE_START;
-+ u32 q_map[EDMA_MAX_CORE] = {0};
-+ u32 hash, cpu;
-+
-+ /* Map all possible hash values to queues used by the EDMA Rx
-+ * rings based on a bitmask, which represents the cores to be mapped.
-+ * These queues are expected to be mapped to different Rx rings
-+ * which are assigned to different cores using IRQ affinity configuration.
-+ */
-+ for_each_cpu(cpu, &edma_rps_cpumask) {
-+ q_map[map_len] = q_off + (cpu * EDMA_MAX_PRI_PER_CORE);
-+ map_len++;
-+ }
-+
-+ for (hash = 0; hash < PPE_QUEUE_HASH_NUM; hash++) {
-+ ret = ppe_edma_queue_offset_config(edma_ctx->ppe_dev,
-+ PPE_QUEUE_CLASS_HASH, hash, q_map[idx]);
-+ if (ret)
-+ return ret;
-+
-+ pr_debug("profile_id: %u, hash: %u, q_off: %u\n",
-+ EDMA_CPU_PORT_PROFILE_ID, hash, q_map[idx]);
-+ idx = (idx + 1) % map_len;
-+ }
-+
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
-@@ -0,0 +1,48 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef __EDMA_CFG_RX__
-+#define __EDMA_CFG_RX__
-+
-+/* SKB payload size used in page mode */
-+#define EDMA_RX_PAGE_MODE_SKB_SIZE 256
-+
-+/* Rx flow control X-OFF default value */
-+#define EDMA_RX_FC_XOFF_DEF 32
-+
-+/* Rx flow control X-ON default value */
-+#define EDMA_RX_FC_XON_DEF 64
-+
-+/* Rx AC flow control original threshold */
-+#define EDMA_RX_AC_FC_THRE_ORIG 0x190
-+
-+/* Rx AC flow control default threshold */
-+#define EDMA_RX_AC_FC_THRES_DEF 0x104
-+/* Rx mitigation timer's default value in microseconds */
-+#define EDMA_RX_MITIGATION_TIMER_DEF 25
-+
-+/* Rx mitigation packet count's default value */
-+#define EDMA_RX_MITIGATION_PKT_CNT_DEF 16
-+
-+/* Default bitmap of cores for RPS to ARM cores */
-+#define EDMA_RX_DEFAULT_BITMAP ((1 << EDMA_MAX_CORE) - 1)
-+
-+int edma_cfg_rx_rings(void);
-+int edma_cfg_rx_rings_alloc(void);
-+void edma_cfg_rx_ring_mappings(void);
-+void edma_cfg_rx_rings_cleanup(void);
-+void edma_cfg_rx_disable_interrupts(void);
-+void edma_cfg_rx_enable_interrupts(void);
-+void edma_cfg_rx_napi_disable(void);
-+void edma_cfg_rx_napi_enable(void);
-+void edma_cfg_rx_napi_delete(void);
-+void edma_cfg_rx_napi_add(void);
-+void edma_cfg_rx_mapping(void);
-+void edma_cfg_rx_rings_enable(void);
-+void edma_cfg_rx_rings_disable(void);
-+void edma_cfg_rx_buff_size_setup(void);
-+int edma_cfg_rx_rps_hash_map(void);
-+int edma_cfg_rx_rps(struct ctl_table *table, int write,
-+ void *buffer, size_t *lenp, loff_t *ppos);
-+#endif
---- a/drivers/net/ethernet/qualcomm/ppe/edma_port.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
-@@ -12,12 +12,39 @@
- #include <linux/printk.h>
-
- #include "edma.h"
-+#include "edma_cfg_rx.h"
- #include "edma_port.h"
- #include "ppe_regs.h"
-
- /* Number of netdev queues. */
- #define EDMA_NETDEV_QUEUE_NUM 4
-
-+static int edma_port_stats_alloc(struct net_device *netdev)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+
-+ if (!port_priv)
-+ return -EINVAL;
-+
-+ /* Allocate per-cpu stats memory. */
-+ port_priv->pcpu_stats.rx_stats =
-+ netdev_alloc_pcpu_stats(struct edma_port_rx_stats);
-+ if (!port_priv->pcpu_stats.rx_stats) {
-+ netdev_err(netdev, "Per-cpu EDMA Rx stats alloc failed for %s\n",
-+ netdev->name);
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
-+
-+static void edma_port_stats_free(struct net_device *netdev)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+
-+ free_percpu(port_priv->pcpu_stats.rx_stats);
-+}
-+
- static u16 __maybe_unused edma_port_select_queue(__maybe_unused struct net_device *netdev,
- __maybe_unused struct sk_buff *skb,
- __maybe_unused struct net_device *sb_dev)
-@@ -172,6 +199,7 @@ void edma_port_destroy(struct ppe_port *
- int port_id = port->port_id;
- struct net_device *netdev = edma_ctx->netdev_arr[port_id - 1];
-
-+ edma_port_stats_free(netdev);
- unregister_netdev(netdev);
- free_netdev(netdev);
- ppe_port_phylink_destroy(port);
-@@ -232,6 +260,13 @@ int edma_port_setup(struct ppe_port *por
- port_id, netdev->dev_addr);
- }
-
-+ /* Allocate memory for EDMA port statistics. */
-+ ret = edma_port_stats_alloc(netdev);
-+ if (ret) {
-+ netdev_dbg(netdev, "EDMA port stats alloc failed\n");
-+ goto stats_alloc_fail;
-+ }
-+
- netdev_dbg(netdev, "Configuring the port %s(qcom-id:%d)\n",
- netdev->name, port_id);
-
-@@ -263,8 +298,10 @@ int edma_port_setup(struct ppe_port *por
- register_netdev_fail:
- ppe_port_phylink_destroy(port);
- port_phylink_setup_fail:
-- free_netdev(netdev);
- edma_ctx->netdev_arr[port_id - 1] = NULL;
-+ edma_port_stats_free(netdev);
-+stats_alloc_fail:
-+ free_netdev(netdev);
-
- return ret;
- }
---- a/drivers/net/ethernet/qualcomm/ppe/edma_port.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.h
-@@ -15,14 +15,45 @@
- | NETIF_F_TSO6)
-
- /**
-+ * struct edma_port_rx_stats - EDMA RX per CPU stats for the port.
-+ * @rx_pkts: Number of Rx packets
-+ * @rx_bytes: Number of Rx bytes
-+ * @rx_drops: Number of Rx drops
-+ * @rx_nr_frag_pkts: Number of Rx nr_frags packets
-+ * @rx_fraglist_pkts: Number of Rx fraglist packets
-+ * @rx_nr_frag_headroom_err: nr_frags headroom error packets
-+ * @syncp: Synchronization pointer
-+ */
-+struct edma_port_rx_stats {
-+ u64 rx_pkts;
-+ u64 rx_bytes;
-+ u64 rx_drops;
-+ u64 rx_nr_frag_pkts;
-+ u64 rx_fraglist_pkts;
-+ u64 rx_nr_frag_headroom_err;
-+ struct u64_stats_sync syncp;
-+};
-+
-+/**
-+ * struct edma_port_pcpu_stats - EDMA per cpu stats data structure for the port.
-+ * @rx_stats: Per CPU Rx statistics
-+ */
-+struct edma_port_pcpu_stats {
-+ struct edma_port_rx_stats __percpu *rx_stats;
-+};
-+
-+/**
- * struct edma_port_priv - EDMA port priv structure.
- * @ppe_port: Pointer to PPE port
- * @netdev: Corresponding netdevice
-+ * @pcpu_stats: Per CPU netdev statistics
-+ * @txr_map: Tx ring per-core mapping
- * @flags: Feature flags
- */
- struct edma_port_priv {
- struct ppe_port *ppe_port;
- struct net_device *netdev;
-+ struct edma_port_pcpu_stats pcpu_stats;
- unsigned long flags;
- };
-
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_rx.c
-@@ -0,0 +1,622 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* Provides APIs to alloc Rx Buffers, reap the buffers, receive and
-+ * process linear and Scatter Gather packets.
-+ */
-+
-+#include <linux/dma-mapping.h>
-+#include <linux/etherdevice.h>
-+#include <linux/irqreturn.h>
-+#include <linux/kernel.h>
-+#include <linux/netdevice.h>
-+#include <linux/platform_device.h>
-+#include <linux/printk.h>
-+#include <linux/regmap.h>
-+
-+#include "edma.h"
-+#include "edma_cfg_rx.h"
-+#include "edma_port.h"
-+#include "ppe.h"
-+#include "ppe_regs.h"
-+
-+static int edma_rx_alloc_buffer_list(struct edma_rxfill_ring *rxfill_ring, int alloc_count)
-+{
-+ struct edma_rxfill_stats *rxfill_stats = &rxfill_ring->rxfill_stats;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ u32 rx_alloc_size = rxfill_ring->alloc_size;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ bool page_mode = rxfill_ring->page_mode;
-+ struct edma_rxfill_desc *rxfill_desc;
-+ u32 buf_len = rxfill_ring->buf_len;
-+ struct device *dev = ppe_dev->dev;
-+ u16 prod_idx, start_idx;
-+ u16 num_alloc = 0;
-+ u32 reg;
-+
-+ prod_idx = rxfill_ring->prod_idx;
-+ start_idx = prod_idx;
-+
-+ while (likely(alloc_count--)) {
-+ dma_addr_t buff_addr;
-+ struct sk_buff *skb;
-+ struct page *pg;
-+
-+ rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, prod_idx);
-+
-+ skb = dev_alloc_skb(rx_alloc_size);
-+ if (unlikely(!skb)) {
-+ u64_stats_update_begin(&rxfill_stats->syncp);
-+ ++rxfill_stats->alloc_failed;
-+ u64_stats_update_end(&rxfill_stats->syncp);
-+ break;
-+ }
-+
-+ skb_reserve(skb, EDMA_RX_SKB_HEADROOM + NET_IP_ALIGN);
-+
-+ if (likely(!page_mode)) {
-+ buff_addr = dma_map_single(dev, skb->data, rx_alloc_size, DMA_FROM_DEVICE);
-+ if (dma_mapping_error(dev, buff_addr)) {
-+ dev_dbg(dev, "edma_context:%p Unable to dma for non page mode",
-+ edma_ctx);
-+ dev_kfree_skb_any(skb);
-+ break;
-+ }
-+ } else {
-+ pg = alloc_page(GFP_ATOMIC);
-+ if (unlikely(!pg)) {
-+ u64_stats_update_begin(&rxfill_stats->syncp);
-+ ++rxfill_stats->page_alloc_failed;
-+ u64_stats_update_end(&rxfill_stats->syncp);
-+ dev_kfree_skb_any(skb);
-+ dev_dbg(dev, "edma_context:%p Unable to allocate page",
-+ edma_ctx);
-+ break;
-+ }
-+
-+ buff_addr = dma_map_page(dev, pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
-+ if (dma_mapping_error(dev, buff_addr)) {
-+ dev_dbg(dev, "edma_context:%p Mapping error for page mode",
-+ edma_ctx);
-+ __free_page(pg);
-+ dev_kfree_skb_any(skb);
-+ break;
-+ }
-+
-+ skb_fill_page_desc(skb, 0, pg, 0, PAGE_SIZE);
-+ }
-+
-+ EDMA_RXFILL_BUFFER_ADDR_SET(rxfill_desc, buff_addr);
-+
-+ EDMA_RXFILL_OPAQUE_LO_SET(rxfill_desc, skb);
-+#ifdef __LP64__
-+ EDMA_RXFILL_OPAQUE_HI_SET(rxfill_desc, skb);
-+#endif
-+ EDMA_RXFILL_PACKET_LEN_SET(rxfill_desc,
-+ (u32)(buf_len) & EDMA_RXFILL_BUF_SIZE_MASK);
-+ prod_idx = (prod_idx + 1) & EDMA_RX_RING_SIZE_MASK;
-+ num_alloc++;
-+ }
-+
-+ if (likely(num_alloc)) {
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->ring_id);
-+ regmap_write(regmap, reg, prod_idx);
-+ rxfill_ring->prod_idx = prod_idx;
-+ }
-+
-+ return num_alloc;
-+}
-+
-+/**
-+ * edma_rx_alloc_buffer - EDMA Rx alloc buffer.
-+ * @rxfill_ring: EDMA Rxfill ring
-+ * @alloc_count: Number of rings to alloc
-+ *
-+ * Alloc Rx buffers for RxFill ring.
-+ *
-+ * Return the number of rings allocated.
-+ */
-+int edma_rx_alloc_buffer(struct edma_rxfill_ring *rxfill_ring, int alloc_count)
-+{
-+ return edma_rx_alloc_buffer_list(rxfill_ring, alloc_count);
-+}
-+
-+/* Mark ip_summed appropriately in the skb as per the L3/L4 checksum
-+ * status in descriptor.
-+ */
-+static void edma_rx_checksum_verify(struct edma_rxdesc_pri *rxdesc_pri,
-+ struct sk_buff *skb)
-+{
-+ u8 pid = EDMA_RXDESC_PID_GET(rxdesc_pri);
-+
-+ skb_checksum_none_assert(skb);
-+
-+ if (likely(EDMA_RX_PID_IS_IPV4(pid))) {
-+ if (likely(EDMA_RXDESC_L3CSUM_STATUS_GET(rxdesc_pri)) &&
-+ likely(EDMA_RXDESC_L4CSUM_STATUS_GET(rxdesc_pri)))
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
-+ } else if (likely(EDMA_RX_PID_IS_IPV6(pid))) {
-+ if (likely(EDMA_RXDESC_L4CSUM_STATUS_GET(rxdesc_pri)))
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
-+ }
-+}
-+
-+static void edma_rx_process_last_segment(struct edma_rxdesc_ring *rxdesc_ring,
-+ struct edma_rxdesc_pri *rxdesc_pri,
-+ struct sk_buff *skb)
-+{
-+ bool page_mode = rxdesc_ring->rxfill->page_mode;
-+ struct edma_port_pcpu_stats *pcpu_stats;
-+ struct edma_port_rx_stats *rx_stats;
-+ struct edma_port_priv *port_dev;
-+ struct sk_buff *skb_head;
-+ struct net_device *dev;
-+ u32 pkt_length;
-+
-+ /* Get packet length. */
-+ pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
-+
-+ skb_head = rxdesc_ring->head;
-+ dev = skb_head->dev;
-+
-+ /* Check Rx checksum offload status. */
-+ if (likely(dev->features & NETIF_F_RXCSUM))
-+ edma_rx_checksum_verify(rxdesc_pri, skb_head);
-+
-+ /* Get stats for the netdevice. */
-+ port_dev = netdev_priv(dev);
-+ pcpu_stats = &port_dev->pcpu_stats;
-+ rx_stats = this_cpu_ptr(pcpu_stats->rx_stats);
-+
-+ if (unlikely(page_mode)) {
-+ if (unlikely(!pskb_may_pull(skb_head, ETH_HLEN))) {
-+ /* Discard the SKB that we have been building,
-+ * in addition to the SKB linked to current descriptor.
-+ */
-+ dev_kfree_skb_any(skb_head);
-+ rxdesc_ring->head = NULL;
-+ rxdesc_ring->last = NULL;
-+ rxdesc_ring->pdesc_head = NULL;
-+
-+ u64_stats_update_begin(&rx_stats->syncp);
-+ rx_stats->rx_nr_frag_headroom_err++;
-+ u64_stats_update_end(&rx_stats->syncp);
-+
-+ return;
-+ }
-+ }
-+
-+ if (unlikely(!pskb_pull(skb_head, EDMA_RXDESC_DATA_OFFSET_GET(rxdesc_ring->pdesc_head)))) {
-+ dev_kfree_skb_any(skb_head);
-+ rxdesc_ring->head = NULL;
-+ rxdesc_ring->last = NULL;
-+ rxdesc_ring->pdesc_head = NULL;
-+
-+ u64_stats_update_begin(&rx_stats->syncp);
-+ rx_stats->rx_nr_frag_headroom_err++;
-+ u64_stats_update_end(&rx_stats->syncp);
-+
-+ return;
-+ }
-+
-+ u64_stats_update_begin(&rx_stats->syncp);
-+ rx_stats->rx_pkts++;
-+ rx_stats->rx_bytes += skb_head->len;
-+ rx_stats->rx_nr_frag_pkts += (u64)page_mode;
-+ rx_stats->rx_fraglist_pkts += (u64)(!page_mode);
-+ u64_stats_update_end(&rx_stats->syncp);
-+
-+ pr_debug("edma_context:%p skb:%p Jumbo pkt_length:%u\n",
-+ edma_ctx, skb_head, skb_head->len);
-+
-+ skb_head->protocol = eth_type_trans(skb_head, dev);
-+
-+ /* Send packet up the stack. */
-+ if (dev->features & NETIF_F_GRO)
-+ napi_gro_receive(&rxdesc_ring->napi, skb_head);
-+ else
-+ netif_receive_skb(skb_head);
-+
-+ rxdesc_ring->head = NULL;
-+ rxdesc_ring->last = NULL;
-+ rxdesc_ring->pdesc_head = NULL;
-+}
-+
-+static void edma_rx_handle_frag_list(struct edma_rxdesc_ring *rxdesc_ring,
-+ struct edma_rxdesc_pri *rxdesc_pri,
-+ struct sk_buff *skb)
-+{
-+ u32 pkt_length;
-+
-+ /* Get packet length. */
-+ pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
-+ pr_debug("edma_context:%p skb:%p fragment pkt_length:%u\n",
-+ edma_ctx, skb, pkt_length);
-+
-+ if (!(rxdesc_ring->head)) {
-+ skb_put(skb, pkt_length);
-+ rxdesc_ring->head = skb;
-+ rxdesc_ring->last = NULL;
-+ rxdesc_ring->pdesc_head = rxdesc_pri;
-+
-+ return;
-+ }
-+
-+ /* Append it to the fraglist of head if this is second frame
-+ * If not second frame append to tail.
-+ */
-+ skb_put(skb, pkt_length);
-+ if (!skb_has_frag_list(rxdesc_ring->head))
-+ skb_shinfo(rxdesc_ring->head)->frag_list = skb;
-+ else
-+ rxdesc_ring->last->next = skb;
-+
-+ rxdesc_ring->last = skb;
-+ rxdesc_ring->last->next = NULL;
-+ rxdesc_ring->head->len += pkt_length;
-+ rxdesc_ring->head->data_len += pkt_length;
-+ rxdesc_ring->head->truesize += skb->truesize;
-+
-+ /* If there are more segments for this packet,
-+ * then we have nothing to do. Otherwise process
-+ * last segment and send packet to stack.
-+ */
-+ if (EDMA_RXDESC_MORE_BIT_GET(rxdesc_pri))
-+ return;
-+
-+ edma_rx_process_last_segment(rxdesc_ring, rxdesc_pri, skb);
-+}
-+
-+static void edma_rx_handle_nr_frags(struct edma_rxdesc_ring *rxdesc_ring,
-+ struct edma_rxdesc_pri *rxdesc_pri,
-+ struct sk_buff *skb)
-+{
-+ skb_frag_t *frag = NULL;
-+ u32 pkt_length;
-+
-+ /* Get packet length. */
-+ pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
-+ pr_debug("edma_context:%p skb:%p fragment pkt_length:%u\n",
-+ edma_ctx, skb, pkt_length);
-+
-+ if (!(rxdesc_ring->head)) {
-+ skb->len = pkt_length;
-+ skb->data_len = pkt_length;
-+ skb->truesize = SKB_TRUESIZE(PAGE_SIZE);
-+ rxdesc_ring->head = skb;
-+ rxdesc_ring->last = NULL;
-+ rxdesc_ring->pdesc_head = rxdesc_pri;
-+
-+ return;
-+ }
-+
-+ frag = &skb_shinfo(skb)->frags[0];
-+
-+ /* Append current frag at correct index as nr_frag of parent. */
-+ skb_add_rx_frag(rxdesc_ring->head, skb_shinfo(rxdesc_ring->head)->nr_frags,
-+ skb_frag_page(frag), 0, pkt_length, PAGE_SIZE);
-+ skb_shinfo(skb)->nr_frags = 0;
-+
-+ /* Free the SKB after we have appended its frag page to the head skb. */
-+ dev_kfree_skb_any(skb);
-+
-+ /* If there are more segments for this packet,
-+ * then we have nothing to do. Otherwise process
-+ * last segment and send packet to stack.
-+ */
-+ if (EDMA_RXDESC_MORE_BIT_GET(rxdesc_pri))
-+ return;
-+
-+ edma_rx_process_last_segment(rxdesc_ring, rxdesc_pri, skb);
-+}
-+
-+static bool edma_rx_handle_linear_packets(struct edma_rxdesc_ring *rxdesc_ring,
-+ struct edma_rxdesc_pri *rxdesc_pri,
-+ struct sk_buff *skb)
-+{
-+ bool page_mode = rxdesc_ring->rxfill->page_mode;
-+ struct edma_port_pcpu_stats *pcpu_stats;
-+ struct edma_port_rx_stats *rx_stats;
-+ struct edma_port_priv *port_dev;
-+ skb_frag_t *frag = NULL;
-+ u32 pkt_length;
-+
-+ /* Get stats for the netdevice. */
-+ port_dev = netdev_priv(skb->dev);
-+ pcpu_stats = &port_dev->pcpu_stats;
-+ rx_stats = this_cpu_ptr(pcpu_stats->rx_stats);
-+
-+ /* Get packet length. */
-+ pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
-+
-+ if (likely(!page_mode)) {
-+ skb_put(skb, pkt_length);
-+ goto send_to_stack;
-+ }
-+
-+ /* Handle linear packet in page mode. */
-+ frag = &skb_shinfo(skb)->frags[0];
-+ skb_add_rx_frag(skb, 0, skb_frag_page(frag), 0, pkt_length, PAGE_SIZE);
-+
-+ /* Pull ethernet header into SKB data area for header processing. */
-+ if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) {
-+ u64_stats_update_begin(&rx_stats->syncp);
-+ rx_stats->rx_nr_frag_headroom_err++;
-+ u64_stats_update_end(&rx_stats->syncp);
-+ dev_kfree_skb_any(skb);
-+
-+ return false;
-+ }
-+
-+send_to_stack:
-+
-+ __skb_pull(skb, EDMA_RXDESC_DATA_OFFSET_GET(rxdesc_pri));
-+
-+ /* Check Rx checksum offload status. */
-+ if (likely(skb->dev->features & NETIF_F_RXCSUM))
-+ edma_rx_checksum_verify(rxdesc_pri, skb);
-+
-+ u64_stats_update_begin(&rx_stats->syncp);
-+ rx_stats->rx_pkts++;
-+ rx_stats->rx_bytes += pkt_length;
-+ rx_stats->rx_nr_frag_pkts += (u64)page_mode;
-+ u64_stats_update_end(&rx_stats->syncp);
-+
-+ skb->protocol = eth_type_trans(skb, skb->dev);
-+ if (skb->dev->features & NETIF_F_GRO)
-+ napi_gro_receive(&rxdesc_ring->napi, skb);
-+ else
-+ netif_receive_skb(skb);
-+
-+ netdev_dbg(skb->dev, "edma_context:%p, skb:%p pkt_length:%u\n",
-+ edma_ctx, skb, skb->len);
-+
-+ return true;
-+}
-+
-+static struct net_device *edma_rx_get_src_dev(struct edma_rxdesc_stats *rxdesc_stats,
-+ struct edma_rxdesc_pri *rxdesc_pri,
-+ struct sk_buff *skb)
-+{
-+ u32 src_info = EDMA_RXDESC_SRC_INFO_GET(rxdesc_pri);
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct net_device *ndev = NULL;
-+ u8 src_port_num;
-+
-+ /* Check src_info. */
-+ if (likely((src_info & EDMA_RXDESC_SRCINFO_TYPE_MASK)
-+ == EDMA_RXDESC_SRCINFO_TYPE_PORTID)) {
-+ src_port_num = src_info & EDMA_RXDESC_PORTNUM_BITS;
-+ } else {
-+ if (net_ratelimit()) {
-+ pr_warn("Invalid src info_type:0x%x. Drop skb:%p\n",
-+ (src_info & EDMA_RXDESC_SRCINFO_TYPE_MASK), skb);
-+ }
-+
-+ u64_stats_update_begin(&rxdesc_stats->syncp);
-+ ++rxdesc_stats->src_port_inval_type;
-+ u64_stats_update_end(&rxdesc_stats->syncp);
-+
-+ return NULL;
-+ }
-+
-+ /* Packet with PP source. */
-+ if (likely(src_port_num <= hw_info->max_ports)) {
-+ if (unlikely(src_port_num < EDMA_START_IFNUM)) {
-+ if (net_ratelimit())
-+ pr_warn("Port number error :%d. Drop skb:%p\n",
-+ src_port_num, skb);
-+
-+ u64_stats_update_begin(&rxdesc_stats->syncp);
-+ ++rxdesc_stats->src_port_inval;
-+ u64_stats_update_end(&rxdesc_stats->syncp);
-+
-+ return NULL;
-+ }
-+
-+ /* Get netdev for this port using the source port
-+ * number as index into the netdev array. We need to
-+ * subtract one since the indices start form '0' and
-+ * port numbers start from '1'.
-+ */
-+ ndev = edma_ctx->netdev_arr[src_port_num - 1];
-+ }
-+
-+ if (likely(ndev))
-+ return ndev;
-+
-+ if (net_ratelimit())
-+ pr_warn("Netdev Null src_info_type:0x%x src port num:%d Drop skb:%p\n",
-+ (src_info & EDMA_RXDESC_SRCINFO_TYPE_MASK),
-+ src_port_num, skb);
-+
-+ u64_stats_update_begin(&rxdesc_stats->syncp);
-+ ++rxdesc_stats->src_port_inval_netdev;
-+ u64_stats_update_end(&rxdesc_stats->syncp);
-+
-+ return NULL;
-+}
-+
-+static int edma_rx_reap(struct edma_rxdesc_ring *rxdesc_ring, int budget)
-+{
-+ struct edma_rxdesc_stats *rxdesc_stats = &rxdesc_ring->rxdesc_stats;
-+ u32 alloc_size = rxdesc_ring->rxfill->alloc_size;
-+ bool page_mode = rxdesc_ring->rxfill->page_mode;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct edma_rxdesc_pri *next_rxdesc_pri;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct device *dev = ppe_dev->dev;
-+ u32 prod_idx, cons_idx, end_idx;
-+ u32 work_to_do, work_done = 0;
-+ struct sk_buff *next_skb;
-+ u32 work_leftover, reg;
-+
-+ /* Get Rx ring producer and consumer indices. */
-+ cons_idx = rxdesc_ring->cons_idx;
-+
-+ if (likely(rxdesc_ring->work_leftover > EDMA_RX_MAX_PROCESS)) {
-+ work_to_do = rxdesc_ring->work_leftover;
-+ } else {
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->ring_id);
-+ regmap_read(regmap, reg, &prod_idx);
-+ prod_idx = prod_idx & EDMA_RXDESC_PROD_IDX_MASK;
-+ work_to_do = EDMA_DESC_AVAIL_COUNT(prod_idx,
-+ cons_idx, EDMA_RX_RING_SIZE);
-+ rxdesc_ring->work_leftover = work_to_do;
-+ }
-+
-+ if (work_to_do > budget)
-+ work_to_do = budget;
-+
-+ rxdesc_ring->work_leftover -= work_to_do;
-+ end_idx = (cons_idx + work_to_do) & EDMA_RX_RING_SIZE_MASK;
-+ next_rxdesc_pri = EDMA_RXDESC_PRI_DESC(rxdesc_ring, cons_idx);
-+
-+ /* Get opaque from RXDESC. */
-+ next_skb = (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(next_rxdesc_pri);
-+
-+ work_leftover = work_to_do & (EDMA_RX_MAX_PROCESS - 1);
-+ while (likely(work_to_do--)) {
-+ struct edma_rxdesc_pri *rxdesc_pri;
-+ struct net_device *ndev;
-+ struct sk_buff *skb;
-+ dma_addr_t dma_addr;
-+
-+ skb = next_skb;
-+ rxdesc_pri = next_rxdesc_pri;
-+ dma_addr = EDMA_RXDESC_BUFFER_ADDR_GET(rxdesc_pri);
-+
-+ if (!page_mode)
-+ dma_unmap_single(dev, dma_addr, alloc_size,
-+ DMA_TO_DEVICE);
-+ else
-+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
-+
-+ /* Update consumer index. */
-+ cons_idx = (cons_idx + 1) & EDMA_RX_RING_SIZE_MASK;
-+
-+ /* Get the next Rx descriptor. */
-+ next_rxdesc_pri = EDMA_RXDESC_PRI_DESC(rxdesc_ring, cons_idx);
-+
-+ /* Handle linear packets or initial segments first. */
-+ if (likely(!(rxdesc_ring->head))) {
-+ ndev = edma_rx_get_src_dev(rxdesc_stats, rxdesc_pri, skb);
-+ if (unlikely(!ndev)) {
-+ dev_kfree_skb_any(skb);
-+ goto next_rx_desc;
-+ }
-+
-+ /* Update skb fields for head skb. */
-+ skb->dev = ndev;
-+ skb->skb_iif = ndev->ifindex;
-+
-+ /* Handle linear packets. */
-+ if (likely(!EDMA_RXDESC_MORE_BIT_GET(rxdesc_pri))) {
-+ next_skb =
-+ (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(next_rxdesc_pri);
-+
-+ if (unlikely(!
-+ edma_rx_handle_linear_packets(rxdesc_ring,
-+ rxdesc_pri, skb)))
-+ dev_kfree_skb_any(skb);
-+
-+ goto next_rx_desc;
-+ }
-+ }
-+
-+ next_skb = (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(next_rxdesc_pri);
-+
-+ /* Handle scatter frame processing for first/middle/last segments. */
-+ page_mode ? edma_rx_handle_nr_frags(rxdesc_ring, rxdesc_pri, skb) :
-+ edma_rx_handle_frag_list(rxdesc_ring, rxdesc_pri, skb);
-+
-+next_rx_desc:
-+ /* Update work done. */
-+ work_done++;
-+
-+ /* Check if we can refill EDMA_RX_MAX_PROCESS worth buffers,
-+ * if yes, refill and update index before continuing.
-+ */
-+ if (unlikely(!(work_done & (EDMA_RX_MAX_PROCESS - 1)))) {
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, cons_idx);
-+ rxdesc_ring->cons_idx = cons_idx;
-+ edma_rx_alloc_buffer_list(rxdesc_ring->rxfill, EDMA_RX_MAX_PROCESS);
-+ }
-+ }
-+
-+ /* Check if we need to refill and update
-+ * index for any buffers before exit.
-+ */
-+ if (unlikely(work_leftover)) {
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, cons_idx);
-+ rxdesc_ring->cons_idx = cons_idx;
-+ edma_rx_alloc_buffer_list(rxdesc_ring->rxfill, work_leftover);
-+ }
-+
-+ return work_done;
-+}
-+
-+/**
-+ * edma_rx_napi_poll - EDMA Rx napi poll.
-+ * @napi: NAPI structure
-+ * @budget: Rx NAPI budget
-+ *
-+ * EDMA RX NAPI handler to handle the NAPI poll.
-+ *
-+ * Return the number of packets processed.
-+ */
-+int edma_rx_napi_poll(struct napi_struct *napi, int budget)
-+{
-+ struct edma_rxdesc_ring *rxdesc_ring = (struct edma_rxdesc_ring *)napi;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ int work_done = 0;
-+ u32 status, reg;
-+
-+ do {
-+ work_done += edma_rx_reap(rxdesc_ring, budget - work_done);
-+ if (likely(work_done >= budget))
-+ return work_done;
-+
-+ /* Check if there are more packets to process. */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_STAT(rxdesc_ring->ring_id);
-+ regmap_read(regmap, reg, &status);
-+ status = status & EDMA_RXDESC_RING_INT_STATUS_MASK;
-+ } while (likely(status));
-+
-+ napi_complete(napi);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_rx);
-+
-+ return work_done;
-+}
-+
-+/**
-+ * edma_rx_handle_irq - EDMA Rx handle irq.
-+ * @irq: Interrupt to handle
-+ * @ctx: Context
-+ *
-+ * Process RX IRQ and schedule NAPI.
-+ *
-+ * Return IRQ_HANDLED(1) on success.
-+ */
-+irqreturn_t edma_rx_handle_irq(int irq, void *ctx)
-+{
-+ struct edma_rxdesc_ring *rxdesc_ring = (struct edma_rxdesc_ring *)ctx;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 reg;
-+
-+ if (likely(napi_schedule_prep(&rxdesc_ring->napi))) {
-+ /* Disable RxDesc interrupt. */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
-+ regmap_write(regmap, reg, EDMA_MASK_INT_DISABLE);
-+ __napi_schedule(&rxdesc_ring->napi);
-+ }
-+
-+ return IRQ_HANDLED;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_rx.h
-@@ -0,0 +1,287 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef __EDMA_RX__
-+#define __EDMA_RX__
-+
-+#include <linux/netdevice.h>
-+
-+#define EDMA_RXFILL_RING_PER_CORE_MAX 1
-+#define EDMA_RXDESC_RING_PER_CORE_MAX 1
-+
-+/* Max Rx processing without replenishing RxFill ring. */
-+#define EDMA_RX_MAX_PROCESS 32
-+
-+#define EDMA_RX_SKB_HEADROOM 128
-+#define EDMA_RX_QUEUE_START 0
-+#define EDMA_RX_BUFFER_SIZE 1984
-+#define EDMA_MAX_CORE 4
-+
-+#define EDMA_GET_DESC(R, i, type) (&(((type *)((R)->desc))[(i)]))
-+#define EDMA_GET_PDESC(R, i, type) (&(((type *)((R)->pdesc))[(i)]))
-+#define EDMA_GET_SDESC(R, i, type) (&(((type *)((R)->sdesc))[(i)]))
-+#define EDMA_RXFILL_DESC(R, i) EDMA_GET_DESC(R, i, \
-+ struct edma_rxfill_desc)
-+#define EDMA_RXDESC_PRI_DESC(R, i) EDMA_GET_PDESC(R, i, \
-+ struct edma_rxdesc_pri)
-+#define EDMA_RXDESC_SEC_DESC(R, i) EDMA_GET_SDESC(R, i, \
-+ struct edma_rxdesc_sec)
-+
-+#define EDMA_RX_RING_SIZE 2048
-+
-+#define EDMA_RX_RING_SIZE_MASK (EDMA_RX_RING_SIZE - 1)
-+#define EDMA_RX_RING_ID_MASK 0x1F
-+
-+#define EDMA_MAX_PRI_PER_CORE 8
-+#define EDMA_RX_PID_IPV4_MAX 0x3
-+#define EDMA_RX_PID_IPV6 0x4
-+#define EDMA_RX_PID_IS_IPV4(pid) (!((pid) & (~EDMA_RX_PID_IPV4_MAX)))
-+#define EDMA_RX_PID_IS_IPV6(pid) (!(!((pid) & EDMA_RX_PID_IPV6)))
-+
-+#define EDMA_RXDESC_BUFFER_ADDR_GET(desc) \
-+ ((u32)(le32_to_cpu((__force __le32)((desc)->word0))))
-+#define EDMA_RXDESC_OPAQUE_GET(_desc) ({ \
-+ typeof(_desc) (desc) = (_desc); \
-+ ((uintptr_t)((u64)((desc)->word2) | \
-+ ((u64)((desc)->word3) << 0x20))); })
-+
-+#define EDMA_RXDESC_SRCINFO_TYPE_PORTID 0x2000
-+#define EDMA_RXDESC_SRCINFO_TYPE_MASK 0xF000
-+#define EDMA_RXDESC_L3CSUM_STATUS_MASK BIT(13)
-+#define EDMA_RXDESC_L4CSUM_STATUS_MASK BIT(12)
-+#define EDMA_RXDESC_PORTNUM_BITS 0x0FFF
-+
-+#define EDMA_RXDESC_PACKET_LEN_MASK 0x3FFFF
-+#define EDMA_RXDESC_PACKET_LEN_GET(_desc) ({ \
-+ typeof(_desc) (desc) = (_desc); \
-+ ((le32_to_cpu((__force __le32)((desc)->word5))) & \
-+ EDMA_RXDESC_PACKET_LEN_MASK); })
-+
-+#define EDMA_RXDESC_MORE_BIT_MASK 0x40000000
-+#define EDMA_RXDESC_MORE_BIT_GET(desc) ((le32_to_cpu((__force __le32)((desc)->word1))) & \
-+ EDMA_RXDESC_MORE_BIT_MASK)
-+#define EDMA_RXDESC_SRC_DST_INFO_GET(desc) \
-+ ((u32)((le32_to_cpu((__force __le32)((desc)->word4)))))
-+
-+#define EDMA_RXDESC_L3_OFFSET_MASK GENMASK(23, 16)
-+#define EDMA_RXDESC_L3_OFFSET_GET(desc) FIELD_GET(EDMA_RXDESC_L3_OFFSET_MASK, \
-+ le32_to_cpu((__force __le32)((desc)->word7)))
-+
-+#define EDMA_RXDESC_PID_MASK GENMASK(15, 12)
-+#define EDMA_RXDESC_PID_GET(desc) FIELD_GET(EDMA_RXDESC_PID_MASK, \
-+ le32_to_cpu((__force __le32)((desc)->word7)))
-+
-+#define EDMA_RXDESC_DST_INFO_MASK GENMASK(31, 16)
-+#define EDMA_RXDESC_DST_INFO_GET(desc) FIELD_GET(EDMA_RXDESC_DST_INFO_MASK, \
-+ le32_to_cpu((__force __le32)((desc)->word4)))
-+
-+#define EDMA_RXDESC_SRC_INFO_MASK GENMASK(15, 0)
-+#define EDMA_RXDESC_SRC_INFO_GET(desc) FIELD_GET(EDMA_RXDESC_SRC_INFO_MASK, \
-+ le32_to_cpu((__force __le32)((desc)->word4)))
-+
-+#define EDMA_RXDESC_PORT_ID_MASK GENMASK(11, 0)
-+#define EDMA_RXDESC_PORT_ID_GET(x) FIELD_GET(EDMA_RXDESC_PORT_ID_MASK, x)
-+
-+#define EDMA_RXDESC_SRC_PORT_ID_GET(desc) (EDMA_RXDESC_PORT_ID_GET \
-+ (EDMA_RXDESC_SRC_INFO_GET(desc)))
-+#define EDMA_RXDESC_DST_PORT_ID_GET(desc) (EDMA_RXDESC_PORT_ID_GET \
-+ (EDMA_RXDESC_DST_INFO_GET(desc)))
-+
-+#define EDMA_RXDESC_DST_PORT (0x2 << EDMA_RXDESC_PID_SHIFT)
-+
-+#define EDMA_RXDESC_L3CSUM_STATUS_GET(desc) FIELD_GET(EDMA_RXDESC_L3CSUM_STATUS_MASK, \
-+ le32_to_cpu((__force __le32)(desc)->word6))
-+#define EDMA_RXDESC_L4CSUM_STATUS_GET(desc) FIELD_GET(EDMA_RXDESC_L4CSUM_STATUS_MASK, \
-+ le32_to_cpu((__force __le32)(desc)->word6))
-+
-+#define EDMA_RXDESC_DATA_OFFSET_MASK GENMASK(11, 0)
-+#define EDMA_RXDESC_DATA_OFFSET_GET(desc) FIELD_GET(EDMA_RXDESC_DATA_OFFSET_MASK, \
-+ le32_to_cpu((__force __le32)(desc)->word6))
-+
-+#define EDMA_RXFILL_BUF_SIZE_MASK 0xFFFF
-+#define EDMA_RXFILL_BUF_SIZE_SHIFT 16
-+
-+/* Opaque values are not accessed by the EDMA HW,
-+ * so endianness conversion is not needed.
-+ */
-+
-+#define EDMA_RXFILL_OPAQUE_LO_SET(desc, ptr) (((desc)->word2) = \
-+ (u32)(uintptr_t)(ptr))
-+#ifdef __LP64__
-+#define EDMA_RXFILL_OPAQUE_HI_SET(desc, ptr) (((desc)->word3) = \
-+ (u32)((u64)(ptr) >> 0x20))
-+#endif
-+
-+#define EDMA_RXFILL_OPAQUE_GET(_desc) ({ \
-+ typeof(_desc) (desc) = (_desc); \
-+ ((uintptr_t)((u64)((desc)->word2) | \
-+ ((u64)((desc)->word3) << 0x20))); })
-+
-+#define EDMA_RXFILL_PACKET_LEN_SET(desc, len) { \
-+ (((desc)->word1) = (u32)((((u32)len) << EDMA_RXFILL_BUF_SIZE_SHIFT) & \
-+ 0xFFFF0000)); \
-+}
-+
-+#define EDMA_RXFILL_BUFFER_ADDR_SET(desc, addr) (((desc)->word0) = (u32)(addr))
-+
-+/* Opaque values are set in word2 and word3, they are not accessed by the EDMA HW,
-+ * so endianness conversion is not needed.
-+ */
-+#define EDMA_RXFILL_ENDIAN_SET(_desc) ({ \
-+ typeof(_desc) (desc) = (_desc); \
-+ cpu_to_le32s(&((desc)->word0)); \
-+ cpu_to_le32s(&((desc)->word1)); \
-+})
-+
-+/* RX DESC size shift to obtain index from descriptor pointer. */
-+#define EDMA_RXDESC_SIZE_SHIFT 5
-+
-+/**
-+ * struct edma_rxdesc_stats - RX descriptor ring stats.
-+ * @src_port_inval: Invalid source port number
-+ * @src_port_inval_type: Source type is not PORT ID
-+ * @src_port_inval_netdev: Invalid net device for the source port
-+ * @syncp: Synchronization pointer
-+ */
-+struct edma_rxdesc_stats {
-+ u64 src_port_inval;
-+ u64 src_port_inval_type;
-+ u64 src_port_inval_netdev;
-+ struct u64_stats_sync syncp;
-+};
-+
-+/**
-+ * struct edma_rxfill_stats - Rx fill descriptor ring stats.
-+ * @alloc_failed: Buffer allocation failure count
-+ * @page_alloc_failed: Page allocation failure count for page mode
-+ * @syncp: Synchronization pointer
-+ */
-+struct edma_rxfill_stats {
-+ u64 alloc_failed;
-+ u64 page_alloc_failed;
-+ struct u64_stats_sync syncp;
-+};
-+
-+/**
-+ * struct edma_rxdesc_pri - Rx descriptor.
-+ * @word0: Buffer address
-+ * @word1: More bit, priority bit, service code
-+ * @word2: Opaque low bits
-+ * @word3: Opaque high bits
-+ * @word4: Destination and source information
-+ * @word5: WiFi QoS, data length
-+ * @word6: Hash value, check sum status
-+ * @word7: DSCP, packet offsets
-+ */
-+struct edma_rxdesc_pri {
-+ u32 word0;
-+ u32 word1;
-+ u32 word2;
-+ u32 word3;
-+ u32 word4;
-+ u32 word5;
-+ u32 word6;
-+ u32 word7;
-+};
-+
-+ /**
-+ * struct edma_rxdesc_sec - Rx secondary descriptor.
-+ * @word0: Timestamp
-+ * @word1: Secondary checksum status
-+ * @word2: QoS tag
-+ * @word3: Flow index details
-+ * @word4: Secondary packet offsets
-+ * @word5: Multicast bit, checksum
-+ * @word6: SVLAN, CVLAN
-+ * @word7: Secondary SVLAN, CVLAN
-+ */
-+struct edma_rxdesc_sec {
-+ u32 word0;
-+ u32 word1;
-+ u32 word2;
-+ u32 word3;
-+ u32 word4;
-+ u32 word5;
-+ u32 word6;
-+ u32 word7;
-+};
-+
-+/**
-+ * struct edma_rxfill_desc - RxFill descriptor.
-+ * @word0: Buffer address
-+ * @word1: Buffer size
-+ * @word2: Opaque low bits
-+ * @word3: Opaque high bits
-+ */
-+struct edma_rxfill_desc {
-+ u32 word0;
-+ u32 word1;
-+ u32 word2;
-+ u32 word3;
-+};
-+
-+/**
-+ * struct edma_rxfill_ring - RxFill ring
-+ * @ring_id: RxFill ring number
-+ * @count: Number of descriptors in the ring
-+ * @prod_idx: Ring producer index
-+ * @alloc_size: Buffer size to allocate
-+ * @desc: Descriptor ring virtual address
-+ * @dma: Descriptor ring physical address
-+ * @buf_len: Buffer length for rxfill descriptor
-+ * @page_mode: Page mode for Rx processing
-+ * @rx_fill_stats: Rx fill ring statistics
-+ */
-+struct edma_rxfill_ring {
-+ u32 ring_id;
-+ u32 count;
-+ u32 prod_idx;
-+ u32 alloc_size;
-+ struct edma_rxfill_desc *desc;
-+ dma_addr_t dma;
-+ u32 buf_len;
-+ bool page_mode;
-+ struct edma_rxfill_stats rxfill_stats;
-+};
-+
-+/**
-+ * struct edma_rxdesc_ring - RxDesc ring
-+ * @napi: Pointer to napi
-+ * @ring_id: Rxdesc ring number
-+ * @count: Number of descriptors in the ring
-+ * @work_leftover: Leftover descriptors to be processed
-+ * @cons_idx: Ring consumer index
-+ * @pdesc: Primary descriptor ring virtual address
-+ * @pdesc_head: Primary descriptor head in case of scatter-gather frame
-+ * @sdesc: Secondary descriptor ring virtual address
-+ * @rxdesc_stats: Rx descriptor ring statistics
-+ * @rxfill: RxFill ring used
-+ * @napi_added: Flag to indicate NAPI add status
-+ * @pdma: Primary descriptor ring physical address
-+ * @sdma: Secondary descriptor ring physical address
-+ * @head: Head of the skb list in case of scatter-gather frame
-+ * @last: Last skb of the skb list in case of scatter-gather frame
-+ */
-+struct edma_rxdesc_ring {
-+ struct napi_struct napi;
-+ u32 ring_id;
-+ u32 count;
-+ u32 work_leftover;
-+ u32 cons_idx;
-+ struct edma_rxdesc_pri *pdesc;
-+ struct edma_rxdesc_pri *pdesc_head;
-+ struct edma_rxdesc_sec *sdesc;
-+ struct edma_rxdesc_stats rxdesc_stats;
-+ struct edma_rxfill_ring *rxfill;
-+ bool napi_added;
-+ dma_addr_t pdma;
-+ dma_addr_t sdma;
-+ struct sk_buff *head;
-+ struct sk_buff *last;
-+};
-+
-+irqreturn_t edma_rx_handle_irq(int irq, void *ctx);
-+int edma_rx_alloc_buffer(struct edma_rxfill_ring *rxfill_ring, int alloc_count);
-+int edma_rx_napi_poll(struct napi_struct *napi, int budget);
-+#endif
+++ /dev/null
-From 1c2736afc17435d3bca18a84f9ed2620a5b03830 Mon Sep 17 00:00:00 2001
-From: Suruchi Agarwal <quic_suruchia@quicinc.com>
-Date: Thu, 21 Mar 2024 16:26:29 -0700
-Subject: [PATCH 41/50] net: ethernet: qualcomm: Add Tx Ethernet DMA support
-
-Add Tx queues, rings, descriptors configurations and
-DMA support for the EDMA.
-
-Change-Id: Idfb0e1fe5ac494d614097d6c97dd15d63bbce8e6
-Co-developed-by: Pavithra R <quic_pavir@quicinc.com>
-Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
-Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
- drivers/net/ethernet/qualcomm/ppe/edma.c | 97 ++-
- drivers/net/ethernet/qualcomm/ppe/edma.h | 7 +
- .../net/ethernet/qualcomm/ppe/edma_cfg_tx.c | 648 ++++++++++++++
- .../net/ethernet/qualcomm/ppe/edma_cfg_tx.h | 28 +
- drivers/net/ethernet/qualcomm/ppe/edma_port.c | 136 +++
- drivers/net/ethernet/qualcomm/ppe/edma_port.h | 35 +
- drivers/net/ethernet/qualcomm/ppe/edma_tx.c | 808 ++++++++++++++++++
- drivers/net/ethernet/qualcomm/ppe/edma_tx.h | 302 +++++++
- 9 files changed, 2055 insertions(+), 8 deletions(-)
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_tx.c
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_tx.h
-
---- a/drivers/net/ethernet/qualcomm/ppe/Makefile
-+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
-@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
- qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
-
- #EDMA
--qcom-ppe-objs += edma.o edma_cfg_rx.o edma_port.o edma_rx.o
-\ No newline at end of file
-+qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_port.o edma_rx.o edma_tx.o
---- a/drivers/net/ethernet/qualcomm/ppe/edma.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
-@@ -18,6 +18,7 @@
- #include <linux/reset.h>
-
- #include "edma.h"
-+#include "edma_cfg_tx.h"
- #include "edma_cfg_rx.h"
- #include "ppe_regs.h"
-
-@@ -25,6 +26,7 @@
-
- /* Global EDMA context. */
- struct edma_context *edma_ctx;
-+static char **edma_txcmpl_irq_name;
- static char **edma_rxdesc_irq_name;
-
- /* Module params. */
-@@ -192,22 +194,59 @@ static int edma_configure_ucast_prio_map
- static int edma_irq_register(void)
- {
- struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *txcmpl = hw_info->txcmpl;
- struct edma_ring_info *rx = hw_info->rx;
- int ret;
- u32 i;
-
-+ /* Request IRQ for TXCMPL rings. */
-+ edma_txcmpl_irq_name = kzalloc((sizeof(char *) * txcmpl->num_rings), GFP_KERNEL);
-+ if (!edma_txcmpl_irq_name)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < txcmpl->num_rings; i++) {
-+ edma_txcmpl_irq_name[i] = kzalloc((sizeof(char *) * EDMA_IRQ_NAME_SIZE),
-+ GFP_KERNEL);
-+ if (!edma_txcmpl_irq_name[i]) {
-+ ret = -ENOMEM;
-+ goto txcmpl_ring_irq_name_alloc_fail;
-+ }
-+
-+ snprintf(edma_txcmpl_irq_name[i], EDMA_IRQ_NAME_SIZE, "edma_txcmpl_%d",
-+ txcmpl->ring_start + i);
-+
-+ irq_set_status_flags(edma_ctx->intr_info.intr_txcmpl[i], IRQ_DISABLE_UNLAZY);
-+
-+ ret = request_irq(edma_ctx->intr_info.intr_txcmpl[i],
-+ edma_tx_handle_irq, IRQF_SHARED,
-+ edma_txcmpl_irq_name[i],
-+ (void *)&edma_ctx->txcmpl_rings[i]);
-+ if (ret) {
-+ pr_err("TXCMPL ring IRQ:%d request %d failed\n",
-+ edma_ctx->intr_info.intr_txcmpl[i], i);
-+ goto txcmpl_ring_intr_req_fail;
-+ }
-+
-+ pr_debug("TXCMPL ring: %d IRQ:%d request success: %s\n",
-+ txcmpl->ring_start + i,
-+ edma_ctx->intr_info.intr_txcmpl[i],
-+ edma_txcmpl_irq_name[i]);
-+ }
-+
- /* Request IRQ for RXDESC rings. */
- edma_rxdesc_irq_name = kzalloc((sizeof(char *) * rx->num_rings),
- GFP_KERNEL);
-- if (!edma_rxdesc_irq_name)
-- return -ENOMEM;
-+ if (!edma_rxdesc_irq_name) {
-+ ret = -ENOMEM;
-+ goto rxdesc_irq_name_alloc_fail;
-+ }
-
- for (i = 0; i < rx->num_rings; i++) {
- edma_rxdesc_irq_name[i] = kzalloc((sizeof(char *) * EDMA_IRQ_NAME_SIZE),
- GFP_KERNEL);
- if (!edma_rxdesc_irq_name[i]) {
- ret = -ENOMEM;
-- goto rxdesc_irq_name_alloc_fail;
-+ goto rxdesc_ring_irq_name_alloc_fail;
- }
-
- snprintf(edma_rxdesc_irq_name[i], 20, "edma_rxdesc_%d",
-@@ -236,8 +275,19 @@ static int edma_irq_register(void)
- rx_desc_ring_intr_req_fail:
- for (i = 0; i < rx->num_rings; i++)
- kfree(edma_rxdesc_irq_name[i]);
--rxdesc_irq_name_alloc_fail:
-+rxdesc_ring_irq_name_alloc_fail:
- kfree(edma_rxdesc_irq_name);
-+rxdesc_irq_name_alloc_fail:
-+ for (i = 0; i < txcmpl->num_rings; i++) {
-+ synchronize_irq(edma_ctx->intr_info.intr_txcmpl[i]);
-+ free_irq(edma_ctx->intr_info.intr_txcmpl[i],
-+ (void *)&edma_ctx->txcmpl_rings[i]);
-+ }
-+txcmpl_ring_intr_req_fail:
-+ for (i = 0; i < txcmpl->num_rings; i++)
-+ kfree(edma_txcmpl_irq_name[i]);
-+txcmpl_ring_irq_name_alloc_fail:
-+ kfree(edma_txcmpl_irq_name);
-
- return ret;
- }
-@@ -326,12 +376,22 @@ static int edma_irq_init(void)
-
- static int edma_alloc_rings(void)
- {
-+ if (edma_cfg_tx_rings_alloc()) {
-+ pr_err("Error in allocating Tx rings\n");
-+ return -ENOMEM;
-+ }
-+
- if (edma_cfg_rx_rings_alloc()) {
- pr_err("Error in allocating Rx rings\n");
-- return -ENOMEM;
-+ goto rx_rings_alloc_fail;
- }
-
- return 0;
-+
-+rx_rings_alloc_fail:
-+ edma_cfg_tx_rings_cleanup();
-+
-+ return -ENOMEM;
- }
-
- static int edma_hw_reset(void)
-@@ -389,7 +449,7 @@ static int edma_hw_configure(void)
- struct edma_hw_info *hw_info = edma_ctx->hw_info;
- struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- struct regmap *regmap = ppe_dev->regmap;
-- u32 data, reg;
-+ u32 data, reg, i;
- int ret;
-
- reg = EDMA_BASE_OFFSET + EDMA_REG_MAS_CTRL_ADDR;
-@@ -439,11 +499,17 @@ static int edma_hw_configure(void)
- }
-
- /* Disable interrupts. */
-+ for (i = 1; i <= hw_info->max_ports; i++)
-+ edma_cfg_tx_disable_interrupts(i);
-+
- edma_cfg_rx_disable_interrupts();
-
- edma_cfg_rx_rings_disable();
-
- edma_cfg_rx_ring_mappings();
-+ edma_cfg_tx_ring_mappings();
-+
-+ edma_cfg_tx_rings();
-
- ret = edma_cfg_rx_rings();
- if (ret) {
-@@ -520,6 +586,7 @@ configure_ucast_prio_map_tbl_failed:
- edma_cfg_rx_napi_delete();
- edma_cfg_rx_rings_disable();
- edma_cfg_rx_rings_failed:
-+ edma_cfg_tx_rings_cleanup();
- edma_cfg_rx_rings_cleanup();
- edma_alloc_rings_failed:
- free_netdev(edma_ctx->dummy_dev);
-@@ -538,13 +605,27 @@ dummy_dev_alloc_failed:
- void edma_destroy(struct ppe_device *ppe_dev)
- {
- struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *txcmpl = hw_info->txcmpl;
- struct edma_ring_info *rx = hw_info->rx;
- u32 i;
-
- /* Disable interrupts. */
-+ for (i = 1; i <= hw_info->max_ports; i++)
-+ edma_cfg_tx_disable_interrupts(i);
-+
- edma_cfg_rx_disable_interrupts();
-
-- /* Free IRQ for RXDESC rings. */
-+ /* Free IRQ for TXCMPL rings. */
-+ for (i = 0; i < txcmpl->num_rings; i++) {
-+ synchronize_irq(edma_ctx->intr_info.intr_txcmpl[i]);
-+
-+ free_irq(edma_ctx->intr_info.intr_txcmpl[i],
-+ (void *)&edma_ctx->txcmpl_rings[i]);
-+ kfree(edma_txcmpl_irq_name[i]);
-+ }
-+ kfree(edma_txcmpl_irq_name);
-+
-+ /* Free IRQ for RXDESC rings */
- for (i = 0; i < rx->num_rings; i++) {
- synchronize_irq(edma_ctx->intr_info.intr_rx[i]);
- free_irq(edma_ctx->intr_info.intr_rx[i],
-@@ -560,6 +641,7 @@ void edma_destroy(struct ppe_device *ppe
- edma_cfg_rx_napi_delete();
- edma_cfg_rx_rings_disable();
- edma_cfg_rx_rings_cleanup();
-+ edma_cfg_tx_rings_cleanup();
-
- free_netdev(edma_ctx->dummy_dev);
- kfree(edma_ctx->netdev_arr);
-@@ -585,6 +667,7 @@ int edma_setup(struct ppe_device *ppe_de
- edma_ctx->hw_info = &ipq9574_hw_info;
- edma_ctx->ppe_dev = ppe_dev;
- edma_ctx->rx_buf_size = rx_buff_size;
-+ edma_ctx->tx_requeue_stop = false;
-
- /* Configure the EDMA common clocks. */
- ret = edma_clock_init();
---- a/drivers/net/ethernet/qualcomm/ppe/edma.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
-@@ -7,6 +7,7 @@
-
- #include "ppe_api.h"
- #include "edma_rx.h"
-+#include "edma_tx.h"
-
- /* One clock cycle = 1/(EDMA clock frequency in Mhz) micro seconds.
- *
-@@ -94,8 +95,11 @@ struct edma_intr_info {
- * @intr_info: EDMA Interrupt info
- * @rxfill_rings: Rx fill Rings, SW is producer
- * @rx_rings: Rx Desc Rings, SW is consumer
-+ * @tx_rings: Tx Descriptor Ring, SW is producer
-+ * @txcmpl_rings: Tx complete Ring, SW is consumer
- * @rx_page_mode: Page mode enabled or disabled
- * @rx_buf_size: Rx buffer size for Jumbo MRU
-+ * @tx_requeue_stop: Tx requeue stop enabled or disabled
- */
- struct edma_context {
- struct net_device **netdev_arr;
-@@ -105,8 +109,11 @@ struct edma_context {
- struct edma_intr_info intr_info;
- struct edma_rxfill_ring *rxfill_rings;
- struct edma_rxdesc_ring *rx_rings;
-+ struct edma_txdesc_ring *tx_rings;
-+ struct edma_txcmpl_ring *txcmpl_rings;
- u32 rx_page_mode;
- u32 rx_buf_size;
-+ bool tx_requeue_stop;
- };
-
- /* Global EDMA context */
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c
-@@ -0,0 +1,648 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* Configure rings, Buffers and NAPI for transmit path along with
-+ * providing APIs to enable, disable, clean and map the Tx rings.
-+ */
-+
-+#include <linux/dma-mapping.h>
-+#include <linux/kernel.h>
-+#include <linux/netdevice.h>
-+#include <linux/printk.h>
-+#include <linux/regmap.h>
-+#include <linux/skbuff.h>
-+
-+#include "edma.h"
-+#include "edma_cfg_tx.h"
-+#include "edma_port.h"
-+#include "ppe.h"
-+#include "ppe_regs.h"
-+
-+static void edma_cfg_txcmpl_ring_cleanup(struct edma_txcmpl_ring *txcmpl_ring)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct device *dev = ppe_dev->dev;
-+
-+ /* Free any buffers assigned to any descriptors. */
-+ edma_tx_complete(EDMA_TX_RING_SIZE - 1, txcmpl_ring);
-+
-+ /* Free TxCmpl ring descriptors. */
-+ dma_free_coherent(dev, sizeof(struct edma_txcmpl_desc)
-+ * txcmpl_ring->count, txcmpl_ring->desc,
-+ txcmpl_ring->dma);
-+ txcmpl_ring->desc = NULL;
-+ txcmpl_ring->dma = (dma_addr_t)0;
-+}
-+
-+static int edma_cfg_txcmpl_ring_setup(struct edma_txcmpl_ring *txcmpl_ring)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct device *dev = ppe_dev->dev;
-+
-+ /* Allocate RxFill ring descriptors. */
-+ txcmpl_ring->desc = dma_alloc_coherent(dev, sizeof(struct edma_txcmpl_desc)
-+ * txcmpl_ring->count,
-+ &txcmpl_ring->dma,
-+ GFP_KERNEL | __GFP_ZERO);
-+
-+ if (unlikely(!txcmpl_ring->desc))
-+ return -ENOMEM;
-+
-+ return 0;
-+}
-+
-+static void edma_cfg_tx_desc_ring_cleanup(struct edma_txdesc_ring *txdesc_ring)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_txdesc_pri *txdesc = NULL;
-+ struct device *dev = ppe_dev->dev;
-+ u32 prod_idx, cons_idx, data, reg;
-+ struct sk_buff *skb = NULL;
-+
-+ /* Free any buffers assigned to any descriptors. */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id);
-+ regmap_read(regmap, reg, &data);
-+ prod_idx = data & EDMA_TXDESC_PROD_IDX_MASK;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id);
-+ regmap_read(regmap, reg, &data);
-+ cons_idx = data & EDMA_TXDESC_CONS_IDX_MASK;
-+
-+ /* Walk active list, obtain skb from descriptor and free it. */
-+ while (cons_idx != prod_idx) {
-+ txdesc = EDMA_TXDESC_PRI_DESC(txdesc_ring, cons_idx);
-+ skb = (struct sk_buff *)EDMA_TXDESC_OPAQUE_GET(txdesc);
-+ dev_kfree_skb_any(skb);
-+
-+ cons_idx = ((cons_idx + 1) & EDMA_TX_RING_SIZE_MASK);
-+ }
-+
-+ /* Free Tx ring descriptors. */
-+ dma_free_coherent(dev, (sizeof(struct edma_txdesc_pri)
-+ * txdesc_ring->count),
-+ txdesc_ring->pdesc,
-+ txdesc_ring->pdma);
-+ txdesc_ring->pdesc = NULL;
-+ txdesc_ring->pdma = (dma_addr_t)0;
-+
-+ /* Free any buffers assigned to any secondary descriptors. */
-+ dma_free_coherent(dev, (sizeof(struct edma_txdesc_sec)
-+ * txdesc_ring->count),
-+ txdesc_ring->sdesc,
-+ txdesc_ring->sdma);
-+ txdesc_ring->sdesc = NULL;
-+ txdesc_ring->sdma = (dma_addr_t)0;
-+}
-+
-+static int edma_cfg_tx_desc_ring_setup(struct edma_txdesc_ring *txdesc_ring)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct device *dev = ppe_dev->dev;
-+
-+ /* Allocate RxFill ring descriptors. */
-+ txdesc_ring->pdesc = dma_alloc_coherent(dev, sizeof(struct edma_txdesc_pri)
-+ * txdesc_ring->count,
-+ &txdesc_ring->pdma,
-+ GFP_KERNEL | __GFP_ZERO);
-+
-+ if (unlikely(!txdesc_ring->pdesc))
-+ return -ENOMEM;
-+
-+ txdesc_ring->sdesc = dma_alloc_coherent(dev, sizeof(struct edma_txdesc_sec)
-+ * txdesc_ring->count,
-+ &txdesc_ring->sdma,
-+ GFP_KERNEL | __GFP_ZERO);
-+
-+ if (unlikely(!txdesc_ring->sdesc)) {
-+ dma_free_coherent(dev, (sizeof(struct edma_txdesc_pri)
-+ * txdesc_ring->count),
-+ txdesc_ring->pdesc,
-+ txdesc_ring->pdma);
-+ txdesc_ring->pdesc = NULL;
-+ txdesc_ring->pdma = (dma_addr_t)0;
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
-+
-+static void edma_cfg_tx_desc_ring_configure(struct edma_txdesc_ring *txdesc_ring)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 data, reg;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_BA(txdesc_ring->id);
-+ regmap_write(regmap, reg, (u32)(txdesc_ring->pdma & EDMA_RING_DMA_MASK));
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_BA2(txdesc_ring->id);
-+ regmap_write(regmap, reg, (u32)(txdesc_ring->sdma & EDMA_RING_DMA_MASK));
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_RING_SIZE(txdesc_ring->id);
-+ regmap_write(regmap, reg, (u32)(txdesc_ring->count & EDMA_TXDESC_RING_SIZE_MASK));
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id);
-+ regmap_write(regmap, reg, (u32)EDMA_TX_INITIAL_PROD_IDX);
-+
-+ data = FIELD_PREP(EDMA_TXDESC_CTRL_FC_GRP_ID_MASK, txdesc_ring->fc_grp_id);
-+
-+ /* Configure group ID for flow control for this Tx ring. */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_CTRL(txdesc_ring->id);
-+ regmap_write(regmap, reg, data);
-+}
-+
-+static void edma_cfg_txcmpl_ring_configure(struct edma_txcmpl_ring *txcmpl_ring)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 data, reg;
-+
-+ /* Configure TxCmpl ring base address. */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_BA(txcmpl_ring->id);
-+ regmap_write(regmap, reg, (u32)(txcmpl_ring->dma & EDMA_RING_DMA_MASK));
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_RING_SIZE(txcmpl_ring->id);
-+ regmap_write(regmap, reg, (u32)(txcmpl_ring->count & EDMA_TXDESC_RING_SIZE_MASK));
-+
-+ /* Set TxCmpl ret mode to opaque. */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_CTRL(txcmpl_ring->id);
-+ regmap_write(regmap, reg, EDMA_TXCMPL_RETMODE_OPAQUE);
-+
-+ /* Configure the Mitigation timer. */
-+ data = EDMA_MICROSEC_TO_TIMER_UNIT(EDMA_TX_MITIGATION_TIMER_DEF,
-+ ppe_dev->clk_rate / MHZ);
-+ data = ((data & EDMA_TX_MOD_TIMER_INIT_MASK)
-+ << EDMA_TX_MOD_TIMER_INIT_SHIFT);
-+ pr_debug("EDMA Tx mitigation timer value: %d\n", data);
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TX_MOD_TIMER(txcmpl_ring->id);
-+ regmap_write(regmap, reg, data);
-+
-+ /* Configure the Mitigation packet count. */
-+ data = (EDMA_TX_MITIGATION_PKT_CNT_DEF & EDMA_TXCMPL_LOW_THRE_MASK)
-+ << EDMA_TXCMPL_LOW_THRE_SHIFT;
-+ pr_debug("EDMA Tx mitigation packet count value: %d\n", data);
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_UGT_THRE(txcmpl_ring->id);
-+ regmap_write(regmap, reg, data);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TX_INT_CTRL(txcmpl_ring->id);
-+ regmap_write(regmap, reg, EDMA_TX_NE_INT_EN);
-+}
-+
-+/**
-+ * edma_cfg_tx_fill_per_port_tx_map - Fill Tx ring mapping.
-+ * @netdev: Netdevice.
-+ * @port_id: Port ID.
-+ *
-+ * Fill per-port Tx ring mapping in net device private area.
-+ */
-+void edma_cfg_tx_fill_per_port_tx_map(struct net_device *netdev, u32 port_id)
-+{
-+ u32 i;
-+
-+ /* Ring to core mapping is done in order starting from 0 for port 1. */
-+ for_each_possible_cpu(i) {
-+ struct edma_port_priv *port_dev = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct edma_txdesc_ring *txdesc_ring;
-+ u32 txdesc_ring_id;
-+
-+ txdesc_ring_id = ((port_id - 1) * num_possible_cpus()) + i;
-+ txdesc_ring = &edma_ctx->tx_rings[txdesc_ring_id];
-+ port_dev->txr_map[i] = txdesc_ring;
-+ }
-+}
-+
-+/**
-+ * edma_cfg_tx_rings_enable - Enable Tx rings.
-+ *
-+ * Enable Tx rings.
-+ */
-+void edma_cfg_tx_rings_enable(u32 port_id)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_txdesc_ring *txdesc_ring;
-+ u32 i, ring_idx, reg;
-+
-+ for_each_possible_cpu(i) {
-+ ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
-+ txdesc_ring = &edma_ctx->tx_rings[ring_idx];
-+ u32 data;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_CTRL(txdesc_ring->id);
-+ regmap_read(regmap, reg, &data);
-+ data |= FIELD_PREP(EDMA_TXDESC_CTRL_TXEN_MASK, EDMA_TXDESC_TX_ENABLE);
-+
-+ regmap_write(regmap, reg, data);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_tx_rings_disable - Disable Tx rings.
-+ *
-+ * Disable Tx rings.
-+ */
-+void edma_cfg_tx_rings_disable(u32 port_id)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_txdesc_ring *txdesc_ring;
-+ u32 i, ring_idx, reg;
-+
-+ for_each_possible_cpu(i) {
-+ ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
-+ txdesc_ring = &edma_ctx->tx_rings[ring_idx];
-+ u32 data;
-+
-+ txdesc_ring = &edma_ctx->tx_rings[i];
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_CTRL(txdesc_ring->id);
-+ regmap_read(regmap, reg, &data);
-+ data &= ~EDMA_TXDESC_TX_ENABLE;
-+ regmap_write(regmap, reg, data);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_tx_ring_mappings - Map Tx to Tx complete rings.
-+ *
-+ * Map Tx to Tx complete rings.
-+ */
-+void edma_cfg_tx_ring_mappings(void)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *txcmpl = hw_info->txcmpl;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_ring_info *tx = hw_info->tx;
-+ u32 desc_index, i, data, reg;
-+
-+ /* Clear the TXDESC2CMPL_MAP_xx reg before setting up
-+ * the mapping. This register holds TXDESC to TXFILL ring
-+ * mapping.
-+ */
-+ regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_0_ADDR, 0);
-+ regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_1_ADDR, 0);
-+ regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_2_ADDR, 0);
-+ regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_3_ADDR, 0);
-+ regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_4_ADDR, 0);
-+ regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_5_ADDR, 0);
-+ desc_index = txcmpl->ring_start;
-+
-+ /* 6 registers to hold the completion mapping for total 32
-+ * TX desc rings (0-5, 6-11, 12-17, 18-23, 24-29 and rest).
-+ * In each entry 5 bits hold the mapping for a particular TX desc ring.
-+ */
-+ for (i = tx->ring_start; i < tx->ring_start + tx->num_rings; i++) {
-+ u32 reg, data;
-+
-+ if (i >= 0 && i <= 5)
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_0_ADDR;
-+ else if (i >= 6 && i <= 11)
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_1_ADDR;
-+ else if (i >= 12 && i <= 17)
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_2_ADDR;
-+ else if (i >= 18 && i <= 23)
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_3_ADDR;
-+ else if (i >= 24 && i <= 29)
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_4_ADDR;
-+ else
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_5_ADDR;
-+
-+ pr_debug("Configure Tx desc:%u to use TxCmpl:%u\n", i, desc_index);
-+
-+ /* Set the Tx complete descriptor ring number in the mapping register.
-+ * E.g. If (txcmpl ring)desc_index = 31, (txdesc ring)i = 28.
-+ * reg = EDMA_REG_TXDESC2CMPL_MAP_4_ADDR
-+ * data |= (desc_index & 0x1F) << ((i % 6) * 5);
-+ * data |= (0x1F << 20); -
-+ * This sets 11111 at 20th bit of register EDMA_REG_TXDESC2CMPL_MAP_4_ADDR.
-+ */
-+ regmap_read(regmap, reg, &data);
-+ data |= (desc_index & EDMA_TXDESC2CMPL_MAP_TXDESC_MASK) << ((i % 6) * 5);
-+ regmap_write(regmap, reg, data);
-+
-+ desc_index++;
-+ if (desc_index == txcmpl->ring_start + txcmpl->num_rings)
-+ desc_index = txcmpl->ring_start;
-+ }
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_0_ADDR;
-+ regmap_read(regmap, reg, &data);
-+ pr_debug("EDMA_REG_TXDESC2CMPL_MAP_0_ADDR: 0x%x\n", data);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_1_ADDR;
-+ regmap_read(regmap, reg, &data);
-+ pr_debug("EDMA_REG_TXDESC2CMPL_MAP_1_ADDR: 0x%x\n", data);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_2_ADDR;
-+ regmap_read(regmap, reg, &data);
-+ pr_debug("EDMA_REG_TXDESC2CMPL_MAP_2_ADDR: 0x%x\n", data);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_3_ADDR;
-+ regmap_read(regmap, reg, &data);
-+ pr_debug("EDMA_REG_TXDESC2CMPL_MAP_3_ADDR: 0x%x\n", data);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_4_ADDR;
-+ regmap_read(regmap, reg, &data);
-+ pr_debug("EDMA_REG_TXDESC2CMPL_MAP_4_ADDR: 0x%x\n", data);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_5_ADDR;
-+ regmap_read(regmap, reg, &data);
-+ pr_debug("EDMA_REG_TXDESC2CMPL_MAP_5_ADDR: 0x%x\n", data);
-+}
-+
-+static int edma_cfg_tx_rings_setup(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *txcmpl = hw_info->txcmpl;
-+ struct edma_ring_info *tx = hw_info->tx;
-+ u32 i, j = 0;
-+
-+ /* Set Txdesc flow control group id, same as port number. */
-+ for (i = 0; i < hw_info->max_ports; i++) {
-+ for_each_possible_cpu(j) {
-+ struct edma_txdesc_ring *txdesc_ring = NULL;
-+ u32 txdesc_idx = (i * num_possible_cpus()) + j;
-+
-+ txdesc_ring = &edma_ctx->tx_rings[txdesc_idx];
-+ txdesc_ring->fc_grp_id = i + 1;
-+ }
-+ }
-+
-+ /* Allocate TxDesc ring descriptors. */
-+ for (i = 0; i < tx->num_rings; i++) {
-+ struct edma_txdesc_ring *txdesc_ring = NULL;
-+ int ret;
-+
-+ txdesc_ring = &edma_ctx->tx_rings[i];
-+ txdesc_ring->count = EDMA_TX_RING_SIZE;
-+ txdesc_ring->id = tx->ring_start + i;
-+
-+ ret = edma_cfg_tx_desc_ring_setup(txdesc_ring);
-+ if (ret) {
-+ pr_err("Error in setting up %d txdesc ring. ret: %d",
-+ txdesc_ring->id, ret);
-+ while (i-- >= 0)
-+ edma_cfg_tx_desc_ring_cleanup(&edma_ctx->tx_rings[i]);
-+
-+ return -ENOMEM;
-+ }
-+ }
-+
-+ /* Allocate TxCmpl ring descriptors. */
-+ for (i = 0; i < txcmpl->num_rings; i++) {
-+ struct edma_txcmpl_ring *txcmpl_ring = NULL;
-+ int ret;
-+
-+ txcmpl_ring = &edma_ctx->txcmpl_rings[i];
-+ txcmpl_ring->count = EDMA_TX_RING_SIZE;
-+ txcmpl_ring->id = txcmpl->ring_start + i;
-+
-+ ret = edma_cfg_txcmpl_ring_setup(txcmpl_ring);
-+ if (ret != 0) {
-+ pr_err("Error in setting up %d TxCmpl ring. ret: %d",
-+ txcmpl_ring->id, ret);
-+ while (i-- >= 0)
-+ edma_cfg_txcmpl_ring_cleanup(&edma_ctx->txcmpl_rings[i]);
-+
-+ goto txcmpl_mem_alloc_fail;
-+ }
-+ }
-+
-+ pr_debug("Tx descriptor count for Tx desc and Tx complete rings: %d\n",
-+ EDMA_TX_RING_SIZE);
-+
-+ return 0;
-+
-+txcmpl_mem_alloc_fail:
-+ for (i = 0; i < tx->num_rings; i++)
-+ edma_cfg_tx_desc_ring_cleanup(&edma_ctx->tx_rings[i]);
-+
-+ return -ENOMEM;
-+}
-+
-+/**
-+ * edma_cfg_tx_rings_alloc - Allocate EDMA Tx rings.
-+ *
-+ * Allocate EDMA Tx rings.
-+ */
-+int edma_cfg_tx_rings_alloc(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *txcmpl = hw_info->txcmpl;
-+ struct edma_ring_info *tx = hw_info->tx;
-+
-+ edma_ctx->tx_rings = kzalloc((sizeof(*edma_ctx->tx_rings) * tx->num_rings),
-+ GFP_KERNEL);
-+ if (!edma_ctx->tx_rings)
-+ return -ENOMEM;
-+
-+ edma_ctx->txcmpl_rings = kzalloc((sizeof(*edma_ctx->txcmpl_rings) * txcmpl->num_rings),
-+ GFP_KERNEL);
-+ if (!edma_ctx->txcmpl_rings)
-+ goto txcmpl_ring_alloc_fail;
-+
-+ pr_debug("Num rings - TxDesc:%u (%u-%u) TxCmpl:%u (%u-%u)\n",
-+ tx->num_rings, tx->ring_start,
-+ (tx->ring_start + tx->num_rings - 1),
-+ txcmpl->num_rings, txcmpl->ring_start,
-+ (txcmpl->ring_start + txcmpl->num_rings - 1));
-+
-+ if (edma_cfg_tx_rings_setup()) {
-+ pr_err("Error in setting up tx rings\n");
-+ goto tx_rings_setup_fail;
-+ }
-+
-+ return 0;
-+
-+tx_rings_setup_fail:
-+ kfree(edma_ctx->txcmpl_rings);
-+ edma_ctx->txcmpl_rings = NULL;
-+
-+txcmpl_ring_alloc_fail:
-+ kfree(edma_ctx->tx_rings);
-+ edma_ctx->tx_rings = NULL;
-+
-+ return -ENOMEM;
-+}
-+
-+/**
-+ * edma_cfg_tx_rings_cleanup - Cleanup EDMA Tx rings.
-+ *
-+ * Cleanup EDMA Tx rings.
-+ */
-+void edma_cfg_tx_rings_cleanup(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *txcmpl = hw_info->txcmpl;
-+ struct edma_ring_info *tx = hw_info->tx;
-+ u32 i;
-+
-+ /* Free any buffers assigned to any descriptors. */
-+ for (i = 0; i < tx->num_rings; i++)
-+ edma_cfg_tx_desc_ring_cleanup(&edma_ctx->tx_rings[i]);
-+
-+ /* Free Tx completion descriptors. */
-+ for (i = 0; i < txcmpl->num_rings; i++)
-+ edma_cfg_txcmpl_ring_cleanup(&edma_ctx->txcmpl_rings[i]);
-+
-+ kfree(edma_ctx->tx_rings);
-+ kfree(edma_ctx->txcmpl_rings);
-+ edma_ctx->tx_rings = NULL;
-+ edma_ctx->txcmpl_rings = NULL;
-+}
-+
-+/**
-+ * edma_cfg_tx_rings - Configure EDMA Tx rings.
-+ *
-+ * Configure EDMA Tx rings.
-+ */
-+void edma_cfg_tx_rings(void)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *txcmpl = hw_info->txcmpl;
-+ struct edma_ring_info *tx = hw_info->tx;
-+ u32 i;
-+
-+ /* Configure Tx desc ring. */
-+ for (i = 0; i < tx->num_rings; i++)
-+ edma_cfg_tx_desc_ring_configure(&edma_ctx->tx_rings[i]);
-+
-+ /* Configure TxCmpl ring. */
-+ for (i = 0; i < txcmpl->num_rings; i++)
-+ edma_cfg_txcmpl_ring_configure(&edma_ctx->txcmpl_rings[i]);
-+}
-+
-+/**
-+ * edma_cfg_tx_disable_interrupts - EDMA disable TX interrupts.
-+ *
-+ * Disable TX interrupt masks.
-+ */
-+void edma_cfg_tx_disable_interrupts(u32 port_id)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_txcmpl_ring *txcmpl_ring;
-+ u32 i, ring_idx, reg;
-+
-+ for_each_possible_cpu(i) {
-+ ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
-+ txcmpl_ring = &edma_ctx->txcmpl_rings[ring_idx];
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TX_INT_MASK(txcmpl_ring->id);
-+ regmap_write(regmap, reg, EDMA_MASK_INT_CLEAR);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_tx_enable_interrupts - EDMA enable TX interrupts.
-+ *
-+ * Enable TX interrupt masks.
-+ */
-+void edma_cfg_tx_enable_interrupts(u32 port_id)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_txcmpl_ring *txcmpl_ring;
-+ u32 i, ring_idx, reg;
-+
-+ for_each_possible_cpu(i) {
-+ ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
-+ txcmpl_ring = &edma_ctx->txcmpl_rings[ring_idx];
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TX_INT_MASK(txcmpl_ring->id);
-+ regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_txcmpl);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_tx_napi_enable - EDMA Tx NAPI.
-+ * @port_id: Port ID.
-+ *
-+ * Enable Tx NAPI.
-+ */
-+void edma_cfg_tx_napi_enable(u32 port_id)
-+{
-+ struct edma_txcmpl_ring *txcmpl_ring;
-+ u32 i, ring_idx;
-+
-+ /* Enabling Tx napi for a interface with each queue. */
-+ for_each_possible_cpu(i) {
-+ ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
-+ txcmpl_ring = &edma_ctx->txcmpl_rings[ring_idx];
-+ if (!txcmpl_ring->napi_added)
-+ continue;
-+
-+ napi_enable(&txcmpl_ring->napi);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_tx_napi_disable - Disable Tx NAPI.
-+ * @port_id: Port ID.
-+ *
-+ * Disable Tx NAPI.
-+ */
-+void edma_cfg_tx_napi_disable(u32 port_id)
-+{
-+ struct edma_txcmpl_ring *txcmpl_ring;
-+ u32 i, ring_idx;
-+
-+ /* Disabling Tx napi for a interface with each queue. */
-+ for_each_possible_cpu(i) {
-+ ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
-+ txcmpl_ring = &edma_ctx->txcmpl_rings[ring_idx];
-+ if (!txcmpl_ring->napi_added)
-+ continue;
-+
-+ napi_disable(&txcmpl_ring->napi);
-+ }
-+}
-+
-+/**
-+ * edma_cfg_tx_napi_delete - Delete Tx NAPI.
-+ * @port_id: Port ID.
-+ *
-+ * Delete Tx NAPI.
-+ */
-+void edma_cfg_tx_napi_delete(u32 port_id)
-+{
-+ struct edma_txcmpl_ring *txcmpl_ring;
-+ u32 i, ring_idx;
-+
-+ /* Disabling Tx napi for a interface with each queue. */
-+ for_each_possible_cpu(i) {
-+ ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
-+ txcmpl_ring = &edma_ctx->txcmpl_rings[ring_idx];
-+ if (!txcmpl_ring->napi_added)
-+ continue;
-+
-+ netif_napi_del(&txcmpl_ring->napi);
-+ txcmpl_ring->napi_added = false;
-+ }
-+}
-+
-+/**
-+ * edma_cfg_tx_napi_add - TX NAPI add.
-+ * @netdev: Netdevice.
-+ * @port_id: Port ID.
-+ *
-+ * TX NAPI add.
-+ */
-+void edma_cfg_tx_napi_add(struct net_device *netdev, u32 port_id)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_txcmpl_ring *txcmpl_ring;
-+ u32 i, ring_idx;
-+
-+ /* Adding tx napi for a interface with each queue. */
-+ for_each_possible_cpu(i) {
-+ ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
-+ txcmpl_ring = &edma_ctx->txcmpl_rings[ring_idx];
-+ netif_napi_add_weight(netdev, &txcmpl_ring->napi,
-+ edma_tx_napi_poll, hw_info->napi_budget_tx);
-+ txcmpl_ring->napi_added = true;
-+ netdev_dbg(netdev, "Napi added for txcmpl ring: %u\n", txcmpl_ring->id);
-+ }
-+
-+ netdev_dbg(netdev, "Tx NAPI budget: %d\n", hw_info->napi_budget_tx);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h
-@@ -0,0 +1,28 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef __EDMA_CFG_TX__
-+#define __EDMA_CFG_TX__
-+
-+/* Tx mitigation timer's default value. */
-+#define EDMA_TX_MITIGATION_TIMER_DEF 250
-+
-+/* Tx mitigation packet count default value. */
-+#define EDMA_TX_MITIGATION_PKT_CNT_DEF 16
-+
-+void edma_cfg_tx_rings(void);
-+int edma_cfg_tx_rings_alloc(void);
-+void edma_cfg_tx_rings_cleanup(void);
-+void edma_cfg_tx_disable_interrupts(u32 port_id);
-+void edma_cfg_tx_enable_interrupts(u32 port_id);
-+void edma_cfg_tx_napi_enable(u32 port_id);
-+void edma_cfg_tx_napi_disable(u32 port_id);
-+void edma_cfg_tx_napi_delete(u32 port_id);
-+void edma_cfg_tx_napi_add(struct net_device *netdevice, u32 macid);
-+void edma_cfg_tx_ring_mappings(void);
-+void edma_cfg_txcmpl_mapping_fill(void);
-+void edma_cfg_tx_rings_enable(u32 port_id);
-+void edma_cfg_tx_rings_disable(u32 port_id);
-+void edma_cfg_tx_fill_per_port_tx_map(struct net_device *netdev, u32 macid);
-+#endif
---- a/drivers/net/ethernet/qualcomm/ppe/edma_port.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
-@@ -13,6 +13,7 @@
-
- #include "edma.h"
- #include "edma_cfg_rx.h"
-+#include "edma_cfg_tx.h"
- #include "edma_port.h"
- #include "ppe_regs.h"
-
-@@ -35,6 +36,15 @@ static int edma_port_stats_alloc(struct
- return -ENOMEM;
- }
-
-+ port_priv->pcpu_stats.tx_stats =
-+ netdev_alloc_pcpu_stats(struct edma_port_tx_stats);
-+ if (!port_priv->pcpu_stats.tx_stats) {
-+ netdev_err(netdev, "Per-cpu EDMA Tx stats alloc failed for %s\n",
-+ netdev->name);
-+ free_percpu(port_priv->pcpu_stats.rx_stats);
-+ return -ENOMEM;
-+ }
-+
- return 0;
- }
-
-@@ -43,6 +53,28 @@ static void edma_port_stats_free(struct
- struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-
- free_percpu(port_priv->pcpu_stats.rx_stats);
-+ free_percpu(port_priv->pcpu_stats.tx_stats);
-+}
-+
-+static void edma_port_configure(struct net_device *netdev)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct ppe_port *port = port_priv->ppe_port;
-+ int port_id = port->port_id;
-+
-+ edma_cfg_tx_fill_per_port_tx_map(netdev, port_id);
-+ edma_cfg_tx_rings_enable(port_id);
-+ edma_cfg_tx_napi_add(netdev, port_id);
-+}
-+
-+static void edma_port_deconfigure(struct net_device *netdev)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct ppe_port *port = port_priv->ppe_port;
-+ int port_id = port->port_id;
-+
-+ edma_cfg_tx_napi_delete(port_id);
-+ edma_cfg_tx_rings_disable(port_id);
- }
-
- static u16 __maybe_unused edma_port_select_queue(__maybe_unused struct net_device *netdev,
-@@ -60,6 +92,7 @@ static int edma_port_open(struct net_dev
- {
- struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
- struct ppe_port *ppe_port;
-+ int port_id;
-
- if (!port_priv)
- return -EINVAL;
-@@ -74,10 +107,14 @@ static int edma_port_open(struct net_dev
- netdev->wanted_features |= EDMA_NETDEV_FEATURES;
-
- ppe_port = port_priv->ppe_port;
-+ port_id = ppe_port->port_id;
-
- if (ppe_port->phylink)
- phylink_start(ppe_port->phylink);
-
-+ edma_cfg_tx_napi_enable(port_id);
-+ edma_cfg_tx_enable_interrupts(port_id);
-+
- netif_start_queue(netdev);
-
- return 0;
-@@ -87,13 +124,21 @@ static int edma_port_close(struct net_de
- {
- struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
- struct ppe_port *ppe_port;
-+ int port_id;
-
- if (!port_priv)
- return -EINVAL;
-
- netif_stop_queue(netdev);
-
-+ /* 20ms delay would provide a plenty of margin to take care of in-flight packets. */
-+ msleep(20);
-+
- ppe_port = port_priv->ppe_port;
-+ port_id = ppe_port->port_id;
-+
-+ edma_cfg_tx_disable_interrupts(port_id);
-+ edma_cfg_tx_napi_disable(port_id);
-
- /* Phylink close. */
- if (ppe_port->phylink)
-@@ -137,6 +182,92 @@ static netdev_features_t edma_port_featu
- return features;
- }
-
-+static netdev_tx_t edma_port_xmit(struct sk_buff *skb,
-+ struct net_device *dev)
-+{
-+ struct edma_port_priv *port_priv = NULL;
-+ struct edma_port_pcpu_stats *pcpu_stats;
-+ struct edma_txdesc_ring *txdesc_ring;
-+ struct edma_port_tx_stats *stats;
-+ enum edma_tx_gso_status result;
-+ struct sk_buff *segs = NULL;
-+ u8 cpu_id;
-+ u32 skbq;
-+ int ret;
-+
-+ if (!skb || !dev)
-+ return NETDEV_TX_OK;
-+
-+ port_priv = netdev_priv(dev);
-+
-+ /* Select a TX ring. */
-+ skbq = (skb_get_queue_mapping(skb) & (num_possible_cpus() - 1));
-+
-+ txdesc_ring = (struct edma_txdesc_ring *)port_priv->txr_map[skbq];
-+
-+ pcpu_stats = &port_priv->pcpu_stats;
-+ stats = this_cpu_ptr(pcpu_stats->tx_stats);
-+
-+ /* HW does not support TSO for packets with more than or equal to
-+ * 32 segments. Perform SW GSO for such packets.
-+ */
-+ result = edma_tx_gso_segment(skb, dev, &segs);
-+ if (likely(result == EDMA_TX_GSO_NOT_NEEDED)) {
-+ /* Transmit the packet. */
-+ ret = edma_tx_ring_xmit(dev, skb, txdesc_ring, stats);
-+
-+ if (unlikely(ret == EDMA_TX_FAIL_NO_DESC)) {
-+ if (likely(!edma_ctx->tx_requeue_stop)) {
-+ cpu_id = smp_processor_id();
-+ netdev_dbg(dev, "Stopping tx queue due to lack oftx descriptors\n");
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->tx_queue_stopped[cpu_id];
-+ u64_stats_update_end(&stats->syncp);
-+ netif_tx_stop_queue(netdev_get_tx_queue(dev, skbq));
-+ return NETDEV_TX_BUSY;
-+ }
-+ }
-+
-+ if (unlikely(ret != EDMA_TX_OK)) {
-+ dev_kfree_skb_any(skb);
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->tx_drops;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+
-+ return NETDEV_TX_OK;
-+ } else if (unlikely(result == EDMA_TX_GSO_FAIL)) {
-+ netdev_dbg(dev, "%p: SW GSO failed for segment size: %d\n",
-+ skb, skb_shinfo(skb)->gso_segs);
-+ dev_kfree_skb_any(skb);
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->tx_gso_drop_pkts;
-+ u64_stats_update_end(&stats->syncp);
-+ return NETDEV_TX_OK;
-+ }
-+
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->tx_gso_pkts;
-+ u64_stats_update_end(&stats->syncp);
-+
-+ dev_kfree_skb_any(skb);
-+ while (segs) {
-+ skb = segs;
-+ segs = segs->next;
-+
-+ /* Transmit the packet. */
-+ ret = edma_tx_ring_xmit(dev, skb, txdesc_ring, stats);
-+ if (unlikely(ret != EDMA_TX_OK)) {
-+ dev_kfree_skb_any(skb);
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->tx_drops;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+ }
-+
-+ return NETDEV_TX_OK;
-+}
-+
- static void edma_port_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
- {
-@@ -179,6 +310,7 @@ static int edma_port_set_mac_address(str
- static const struct net_device_ops edma_port_netdev_ops = {
- .ndo_open = edma_port_open,
- .ndo_stop = edma_port_close,
-+ .ndo_start_xmit = edma_port_xmit,
- .ndo_get_stats64 = edma_port_get_stats64,
- .ndo_set_mac_address = edma_port_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
-@@ -199,6 +331,7 @@ void edma_port_destroy(struct ppe_port *
- int port_id = port->port_id;
- struct net_device *netdev = edma_ctx->netdev_arr[port_id - 1];
-
-+ edma_port_deconfigure(netdev);
- edma_port_stats_free(netdev);
- unregister_netdev(netdev);
- free_netdev(netdev);
-@@ -276,6 +409,8 @@ int edma_port_setup(struct ppe_port *por
- */
- edma_ctx->netdev_arr[port_id - 1] = netdev;
-
-+ edma_port_configure(netdev);
-+
- /* Setup phylink. */
- ret = ppe_port_phylink_setup(port, netdev);
- if (ret) {
-@@ -298,6 +433,7 @@ int edma_port_setup(struct ppe_port *por
- register_netdev_fail:
- ppe_port_phylink_destroy(port);
- port_phylink_setup_fail:
-+ edma_port_deconfigure(netdev);
- edma_ctx->netdev_arr[port_id - 1] = NULL;
- edma_port_stats_free(netdev);
- stats_alloc_fail:
---- a/drivers/net/ethernet/qualcomm/ppe/edma_port.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.h
-@@ -7,6 +7,8 @@
-
- #include "ppe_port.h"
-
-+#define EDMA_PORT_MAX_CORE 4
-+
- #define EDMA_NETDEV_FEATURES (NETIF_F_FRAGLIST \
- | NETIF_F_SG \
- | NETIF_F_RXCSUM \
-@@ -35,11 +37,43 @@ struct edma_port_rx_stats {
- };
-
- /**
-+ * struct edma_port_tx_stats - EDMA TX port per CPU stats for the port.
-+ * @tx_pkts: Number of Tx packets
-+ * @tx_bytes: Number of Tx bytes
-+ * @tx_drops: Number of Tx drops
-+ * @tx_nr_frag_pkts: Number of Tx nr_frag packets
-+ * @tx_fraglist_pkts: Number of Tx fraglist packets
-+ * @tx_fraglist_with_nr_frags_pkts: Number of Tx packets with fraglist and nr_frags
-+ * @tx_tso_pkts: Number of Tx TSO packets
-+ * @tx_tso_drop_pkts: Number of Tx TSO drop packets
-+ * @tx_gso_pkts: Number of Tx GSO packets
-+ * @tx_gso_drop_pkts: Number of Tx GSO drop packets
-+ * @tx_queue_stopped: Number of Tx queue stopped packets
-+ * @syncp: Synchronization pointer
-+ */
-+struct edma_port_tx_stats {
-+ u64 tx_pkts;
-+ u64 tx_bytes;
-+ u64 tx_drops;
-+ u64 tx_nr_frag_pkts;
-+ u64 tx_fraglist_pkts;
-+ u64 tx_fraglist_with_nr_frags_pkts;
-+ u64 tx_tso_pkts;
-+ u64 tx_tso_drop_pkts;
-+ u64 tx_gso_pkts;
-+ u64 tx_gso_drop_pkts;
-+ u64 tx_queue_stopped[EDMA_PORT_MAX_CORE];
-+ struct u64_stats_sync syncp;
-+};
-+
-+/**
- * struct edma_port_pcpu_stats - EDMA per cpu stats data structure for the port.
- * @rx_stats: Per CPU Rx statistics
-+ * @tx_stats: Per CPU Tx statistics
- */
- struct edma_port_pcpu_stats {
- struct edma_port_rx_stats __percpu *rx_stats;
-+ struct edma_port_tx_stats __percpu *tx_stats;
- };
-
- /**
-@@ -54,6 +88,7 @@ struct edma_port_priv {
- struct ppe_port *ppe_port;
- struct net_device *netdev;
- struct edma_port_pcpu_stats pcpu_stats;
-+ struct edma_txdesc_ring *txr_map[EDMA_PORT_MAX_CORE];
- unsigned long flags;
- };
-
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_tx.c
-@@ -0,0 +1,808 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* Provide APIs to alloc Tx Buffers, fill the Tx descriptors and transmit
-+ * Scatter Gather and linear packets, Tx complete to free the skb after transmit.
-+ */
-+
-+#include <linux/dma-mapping.h>
-+#include <linux/kernel.h>
-+#include <linux/netdevice.h>
-+#include <linux/platform_device.h>
-+#include <linux/printk.h>
-+#include <net/gso.h>
-+#include <linux/regmap.h>
-+
-+#include "edma.h"
-+#include "edma_cfg_tx.h"
-+#include "edma_port.h"
-+#include "ppe.h"
-+#include "ppe_regs.h"
-+
-+static u32 edma_tx_num_descs_for_sg(struct sk_buff *skb)
-+{
-+ u32 nr_frags_first = 0, num_tx_desc_needed = 0;
-+
-+ /* Check if we have enough Tx descriptors for SG. */
-+ if (unlikely(skb_shinfo(skb)->nr_frags)) {
-+ nr_frags_first = skb_shinfo(skb)->nr_frags;
-+ WARN_ON_ONCE(nr_frags_first > MAX_SKB_FRAGS);
-+ num_tx_desc_needed += nr_frags_first;
-+ }
-+
-+ /* Walk through fraglist skbs making a note of nr_frags
-+ * One Tx desc for fraglist skb. Fraglist skb may have
-+ * further nr_frags.
-+ */
-+ if (unlikely(skb_has_frag_list(skb))) {
-+ struct sk_buff *iter_skb;
-+
-+ skb_walk_frags(skb, iter_skb) {
-+ u32 nr_frags = skb_shinfo(iter_skb)->nr_frags;
-+
-+ WARN_ON_ONCE(nr_frags > MAX_SKB_FRAGS);
-+ num_tx_desc_needed += (1 + nr_frags);
-+ }
-+ }
-+
-+ return (num_tx_desc_needed + 1);
-+}
-+
-+/**
-+ * edma_tx_gso_segment - Tx GSO.
-+ * @skb: Socket Buffer.
-+ * @netdev: Netdevice.
-+ * @segs: SKB segments from GSO.
-+ *
-+ * Format skbs into GSOs.
-+ *
-+ * Return 1 on success, error code on failure.
-+ */
-+enum edma_tx_gso_status edma_tx_gso_segment(struct sk_buff *skb,
-+ struct net_device *netdev, struct sk_buff **segs)
-+{
-+ u32 num_tx_desc_needed;
-+
-+ /* Check is skb is non-linear to proceed. */
-+ if (likely(!skb_is_nonlinear(skb)))
-+ return EDMA_TX_GSO_NOT_NEEDED;
-+
-+ /* Check if TSO is enabled. If so, return as skb doesn't
-+ * need to be segmented by linux.
-+ */
-+ if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) {
-+ num_tx_desc_needed = edma_tx_num_descs_for_sg(skb);
-+ if (likely(num_tx_desc_needed <= EDMA_TX_TSO_SEG_MAX))
-+ return EDMA_TX_GSO_NOT_NEEDED;
-+ }
-+
-+ /* GSO segmentation of the skb into multiple segments. */
-+ *segs = skb_gso_segment(skb, netdev->features
-+ & ~(NETIF_F_TSO | NETIF_F_TSO6));
-+
-+ /* Check for error in GSO segmentation. */
-+ if (IS_ERR_OR_NULL(*segs)) {
-+ netdev_info(netdev, "Tx gso fail\n");
-+ return EDMA_TX_GSO_FAIL;
-+ }
-+
-+ return EDMA_TX_GSO_SUCCEED;
-+}
-+
-+/**
-+ * edma_tx_complete - Reap Tx completion descriptors.
-+ * @work_to_do: Work to do.
-+ * @txcmpl_ring: Tx Completion ring.
-+ *
-+ * Reap Tx completion descriptors of the transmitted
-+ * packets and free the corresponding SKBs.
-+ *
-+ * Return the number descriptors for which Tx complete is done.
-+ */
-+u32 edma_tx_complete(u32 work_to_do, struct edma_txcmpl_ring *txcmpl_ring)
-+{
-+ struct edma_txcmpl_stats *txcmpl_stats = &txcmpl_ring->txcmpl_stats;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 cons_idx, end_idx, data, cpu_id;
-+ struct device *dev = ppe_dev->dev;
-+ u32 avail, count, txcmpl_errors;
-+ struct edma_txcmpl_desc *txcmpl;
-+ u32 prod_idx = 0, more_bit = 0;
-+ struct netdev_queue *nq;
-+ struct sk_buff *skb;
-+ u32 reg;
-+
-+ cons_idx = txcmpl_ring->cons_idx;
-+
-+ if (likely(txcmpl_ring->avail_pkt >= work_to_do)) {
-+ avail = work_to_do;
-+ } else {
-+ /* Get TXCMPL ring producer index. */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_PROD_IDX(txcmpl_ring->id);
-+ regmap_read(regmap, reg, &data);
-+ prod_idx = data & EDMA_TXCMPL_PROD_IDX_MASK;
-+
-+ avail = EDMA_DESC_AVAIL_COUNT(prod_idx, cons_idx, EDMA_TX_RING_SIZE);
-+ txcmpl_ring->avail_pkt = avail;
-+
-+ if (unlikely(!avail)) {
-+ dev_dbg(dev, "No available descriptors are pending for %d txcmpl ring\n",
-+ txcmpl_ring->id);
-+ u64_stats_update_begin(&txcmpl_stats->syncp);
-+ ++txcmpl_stats->no_pending_desc;
-+ u64_stats_update_end(&txcmpl_stats->syncp);
-+ return 0;
-+ }
-+
-+ avail = min(avail, work_to_do);
-+ }
-+
-+ count = avail;
-+
-+ end_idx = (cons_idx + avail) & EDMA_TX_RING_SIZE_MASK;
-+ txcmpl = EDMA_TXCMPL_DESC(txcmpl_ring, cons_idx);
-+
-+ /* Instead of freeing the skb, it might be better to save and use
-+ * for Rxfill.
-+ */
-+ while (likely(avail--)) {
-+ /* The last descriptor holds the SKB pointer for scattered frames.
-+ * So skip the descriptors with more bit set.
-+ */
-+ more_bit = EDMA_TXCMPL_MORE_BIT_GET(txcmpl);
-+ if (unlikely(more_bit)) {
-+ u64_stats_update_begin(&txcmpl_stats->syncp);
-+ ++txcmpl_stats->desc_with_more_bit;
-+ u64_stats_update_end(&txcmpl_stats->syncp);
-+ cons_idx = ((cons_idx + 1) & EDMA_TX_RING_SIZE_MASK);
-+ txcmpl = EDMA_TXCMPL_DESC(txcmpl_ring, cons_idx);
-+ continue;
-+ }
-+
-+ /* Find and free the skb for Tx completion. */
-+ skb = (struct sk_buff *)EDMA_TXCMPL_OPAQUE_GET(txcmpl);
-+ if (unlikely(!skb)) {
-+ if (net_ratelimit())
-+ dev_warn(dev, "Invalid cons_idx:%u prod_idx:%u word2:%x word3:%x\n",
-+ cons_idx, prod_idx, txcmpl->word2, txcmpl->word3);
-+
-+ u64_stats_update_begin(&txcmpl_stats->syncp);
-+ ++txcmpl_stats->invalid_buffer;
-+ u64_stats_update_end(&txcmpl_stats->syncp);
-+ } else {
-+ dev_dbg(dev, "TXCMPL: skb:%p, skb->len %d, skb->data_len %d, cons_idx:%d prod_idx:%d word2:0x%x word3:0x%x\n",
-+ skb, skb->len, skb->data_len, cons_idx, prod_idx,
-+ txcmpl->word2, txcmpl->word3);
-+
-+ txcmpl_errors = EDMA_TXCOMP_RING_ERROR_GET(txcmpl->word3);
-+ if (unlikely(txcmpl_errors)) {
-+ if (net_ratelimit())
-+ dev_err(dev, "Error 0x%0x observed in tx complete %d ring\n",
-+ txcmpl_errors, txcmpl_ring->id);
-+
-+ u64_stats_update_begin(&txcmpl_stats->syncp);
-+ ++txcmpl_stats->errors;
-+ u64_stats_update_end(&txcmpl_stats->syncp);
-+ }
-+
-+ /* Retrieve pool id for unmapping.
-+ * 0 for linear skb and (pool id - 1) represents nr_frag index.
-+ */
-+ if (!EDMA_TXCOMP_POOL_ID_GET(txcmpl)) {
-+ dma_unmap_single(dev, virt_to_phys(skb->data),
-+ skb->len, DMA_TO_DEVICE);
-+ } else {
-+ u8 frag_index = (EDMA_TXCOMP_POOL_ID_GET(txcmpl) - 1);
-+ skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_index];
-+
-+ dma_unmap_page(dev, virt_to_phys(frag),
-+ PAGE_SIZE, DMA_TO_DEVICE);
-+ }
-+
-+ dev_kfree_skb(skb);
-+ }
-+
-+ cons_idx = ((cons_idx + 1) & EDMA_TX_RING_SIZE_MASK);
-+ txcmpl = EDMA_TXCMPL_DESC(txcmpl_ring, cons_idx);
-+ }
-+
-+ txcmpl_ring->cons_idx = cons_idx;
-+ txcmpl_ring->avail_pkt -= count;
-+
-+ dev_dbg(dev, "TXCMPL:%u count:%u prod_idx:%u cons_idx:%u\n",
-+ txcmpl_ring->id, count, prod_idx, cons_idx);
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_CONS_IDX(txcmpl_ring->id);
-+ regmap_write(regmap, reg, cons_idx);
-+
-+ /* If tx_requeue_stop disabled (tx_requeue_stop = 0)
-+ * Fetch the tx queue of interface and check if it is stopped.
-+ * if queue is stopped and interface is up, wake up this queue.
-+ */
-+ if (unlikely(!edma_ctx->tx_requeue_stop)) {
-+ cpu_id = smp_processor_id();
-+ nq = netdev_get_tx_queue(txcmpl_ring->napi.dev, cpu_id);
-+ if (unlikely(netif_tx_queue_stopped(nq)) &&
-+ netif_carrier_ok(txcmpl_ring->napi.dev)) {
-+ dev_dbg(dev, "Waking queue number %d, for interface %s\n",
-+ cpu_id, txcmpl_ring->napi.dev->name);
-+ __netif_tx_lock(nq, cpu_id);
-+ netif_tx_wake_queue(nq);
-+ __netif_tx_unlock(nq);
-+ }
-+ }
-+
-+ return count;
-+}
-+
-+/**
-+ * edma_tx_napi_poll - EDMA TX NAPI handler.
-+ * @napi: NAPI structure.
-+ * @budget: Tx NAPI Budget.
-+ *
-+ * EDMA TX NAPI handler.
-+ */
-+int edma_tx_napi_poll(struct napi_struct *napi, int budget)
-+{
-+ struct edma_txcmpl_ring *txcmpl_ring = (struct edma_txcmpl_ring *)napi;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 txcmpl_intr_status;
-+ int work_done = 0;
-+ u32 data, reg;
-+
-+ do {
-+ work_done += edma_tx_complete(budget - work_done, txcmpl_ring);
-+ if (work_done >= budget)
-+ return work_done;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TX_INT_STAT(txcmpl_ring->id);
-+ regmap_read(regmap, reg, &data);
-+ txcmpl_intr_status = data & EDMA_TXCMPL_RING_INT_STATUS_MASK;
-+ } while (txcmpl_intr_status);
-+
-+ /* No more packets to process. Finish NAPI processing. */
-+ napi_complete(napi);
-+
-+ /* Set TXCMPL ring interrupt mask. */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TX_INT_MASK(txcmpl_ring->id);
-+ regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_txcmpl);
-+
-+ return work_done;
-+}
-+
-+/**
-+ * edma_tx_handle_irq - Tx IRQ Handler.
-+ * @irq: Interrupt request.
-+ * @ctx: Context.
-+ *
-+ * Process TX IRQ and schedule NAPI.
-+ *
-+ * Return IRQ handler code.
-+ */
-+irqreturn_t edma_tx_handle_irq(int irq, void *ctx)
-+{
-+ struct edma_txcmpl_ring *txcmpl_ring = (struct edma_txcmpl_ring *)ctx;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 reg;
-+
-+ pr_debug("irq: irq=%d txcmpl_ring_id=%u\n", irq, txcmpl_ring->id);
-+ if (likely(napi_schedule_prep(&txcmpl_ring->napi))) {
-+ /* Disable TxCmpl intr. */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TX_INT_MASK(txcmpl_ring->id);
-+ regmap_write(regmap, reg, EDMA_MASK_INT_DISABLE);
-+ __napi_schedule(&txcmpl_ring->napi);
-+ }
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static void edma_tx_dma_unmap_frags(struct sk_buff *skb, u32 nr_frags)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct device *dev = ppe_dev->dev;
-+ u32 buf_len = 0;
-+ u8 i = 0;
-+
-+ for (i = 0; i < skb_shinfo(skb)->nr_frags - nr_frags; i++) {
-+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-+
-+ /* DMA mapping was not done for zero size segments. */
-+ buf_len = skb_frag_size(frag);
-+ if (unlikely(buf_len == 0))
-+ continue;
-+
-+ dma_unmap_page(dev, virt_to_phys(frag), PAGE_SIZE,
-+ DMA_TO_DEVICE);
-+ }
-+}
-+
-+static u32 edma_tx_skb_nr_frags(struct edma_txdesc_ring *txdesc_ring,
-+ struct edma_txdesc_pri **txdesc, struct sk_buff *skb,
-+ u32 *hw_next_to_use, u32 *invalid_frag)
-+{
-+ u32 nr_frags = 0, buf_len = 0, num_descs = 0, start_idx = 0, end_idx = 0;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ u32 start_hw_next_to_use = *hw_next_to_use;
-+ struct edma_txdesc_pri *txd = *txdesc;
-+ struct device *dev = ppe_dev->dev;
-+ u8 i = 0;
-+
-+ /* Hold onto the index mapped to *txdesc.
-+ * This will be the index previous to that of current *hw_next_to_use.
-+ */
-+ start_idx = (((*hw_next_to_use) + EDMA_TX_RING_SIZE_MASK)
-+ & EDMA_TX_RING_SIZE_MASK);
-+
-+ /* Handle if the skb has nr_frags. */
-+ nr_frags = skb_shinfo(skb)->nr_frags;
-+ num_descs = nr_frags;
-+ i = 0;
-+
-+ while (nr_frags--) {
-+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-+ dma_addr_t buff_addr;
-+
-+ buf_len = skb_frag_size(frag);
-+
-+ /* Zero size segment can lead EDMA HW to hang so, we don't want to
-+ * process them. Zero size segment can happen during TSO operation
-+ * if there is nothing but header in the primary segment.
-+ */
-+ if (unlikely(buf_len == 0)) {
-+ num_descs--;
-+ i++;
-+ continue;
-+ }
-+
-+ /* Setting the MORE bit on the previous Tx descriptor.
-+ * Note: We will flush this descriptor as well later.
-+ */
-+ EDMA_TXDESC_MORE_BIT_SET(txd, 1);
-+ EDMA_TXDESC_ENDIAN_SET(txd);
-+
-+ txd = EDMA_TXDESC_PRI_DESC(txdesc_ring, *hw_next_to_use);
-+ memset(txd, 0, sizeof(struct edma_txdesc_pri));
-+ buff_addr = skb_frag_dma_map(dev, frag, 0, buf_len,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, buff_addr)) {
-+ dev_dbg(dev, "Unable to dma first descriptor for nr_frags tx\n");
-+ *hw_next_to_use = start_hw_next_to_use;
-+ *invalid_frag = nr_frags;
-+ return 0;
-+ }
-+
-+ EDMA_TXDESC_BUFFER_ADDR_SET(txd, buff_addr);
-+ EDMA_TXDESC_DATA_LEN_SET(txd, buf_len);
-+ EDMA_TXDESC_POOL_ID_SET(txd, (i + 1));
-+
-+ *hw_next_to_use = ((*hw_next_to_use + 1) & EDMA_TX_RING_SIZE_MASK);
-+ i++;
-+ }
-+
-+ EDMA_TXDESC_ENDIAN_SET(txd);
-+
-+ /* This will be the index previous to that of current *hw_next_to_use. */
-+ end_idx = (((*hw_next_to_use) + EDMA_TX_RING_SIZE_MASK) & EDMA_TX_RING_SIZE_MASK);
-+
-+ *txdesc = txd;
-+
-+ return num_descs;
-+}
-+
-+static void edma_tx_fill_pp_desc(struct edma_port_priv *port_priv,
-+ struct edma_txdesc_pri *txd, struct sk_buff *skb,
-+ struct edma_port_tx_stats *stats)
-+{
-+ struct ppe_port *port = port_priv->ppe_port;
-+ int port_id = port->port_id;
-+
-+ /* Offload L3/L4 checksum computation. */
-+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
-+ EDMA_TXDESC_ADV_OFFLOAD_SET(txd);
-+ EDMA_TXDESC_IP_CSUM_SET(txd);
-+ EDMA_TXDESC_L4_CSUM_SET(txd);
-+ }
-+
-+ /* Check if the packet needs TSO
-+ * This will be mostly true for SG packets.
-+ */
-+ if (unlikely(skb_is_gso(skb))) {
-+ if ((skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) ||
-+ (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
-+ u32 mss = skb_shinfo(skb)->gso_size;
-+
-+ /* If MSS<256, HW will do TSO using MSS=256,
-+ * if MSS>10K, HW will do TSO using MSS=10K,
-+ * else HW will report error 0x200000 in Tx Cmpl.
-+ */
-+ if (mss < EDMA_TX_TSO_MSS_MIN)
-+ mss = EDMA_TX_TSO_MSS_MIN;
-+ else if (mss > EDMA_TX_TSO_MSS_MAX)
-+ mss = EDMA_TX_TSO_MSS_MAX;
-+
-+ EDMA_TXDESC_TSO_ENABLE_SET(txd, 1);
-+ EDMA_TXDESC_MSS_SET(txd, mss);
-+
-+ /* Update tso stats. */
-+ u64_stats_update_begin(&stats->syncp);
-+ stats->tx_tso_pkts++;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+ }
-+
-+ /* Set destination information in the descriptor. */
-+ EDMA_TXDESC_SERVICE_CODE_SET(txd, PPE_EDMA_SC_BYPASS_ID);
-+ EDMA_DST_INFO_SET(txd, port_id);
-+}
-+
-+static struct edma_txdesc_pri *edma_tx_skb_first_desc(struct edma_port_priv *port_priv,
-+ struct edma_txdesc_ring *txdesc_ring,
-+ struct sk_buff *skb, u32 *hw_next_to_use,
-+ struct edma_port_tx_stats *stats)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct edma_txdesc_pri *txd = NULL;
-+ struct device *dev = ppe_dev->dev;
-+ dma_addr_t buff_addr;
-+ u32 buf_len = 0;
-+
-+ /* Get the packet length. */
-+ buf_len = skb_headlen(skb);
-+ txd = EDMA_TXDESC_PRI_DESC(txdesc_ring, *hw_next_to_use);
-+ memset(txd, 0, sizeof(struct edma_txdesc_pri));
-+
-+ /* Set the data pointer as the buffer address in the descriptor. */
-+ buff_addr = dma_map_single(dev, skb->data, buf_len, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, buff_addr)) {
-+ dev_dbg(dev, "Unable to dma first descriptor for tx\n");
-+ return NULL;
-+ }
-+
-+ EDMA_TXDESC_BUFFER_ADDR_SET(txd, buff_addr);
-+ EDMA_TXDESC_POOL_ID_SET(txd, 0);
-+ edma_tx_fill_pp_desc(port_priv, txd, skb, stats);
-+
-+ /* Set packet length in the descriptor. */
-+ EDMA_TXDESC_DATA_LEN_SET(txd, buf_len);
-+ *hw_next_to_use = (*hw_next_to_use + 1) & EDMA_TX_RING_SIZE_MASK;
-+
-+ return txd;
-+}
-+
-+static void edma_tx_handle_dma_err(struct sk_buff *skb, u32 num_sg_frag_list)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct device *dev = ppe_dev->dev;
-+ struct sk_buff *iter_skb = NULL;
-+ u32 cnt_sg_frag_list = 0;
-+
-+ /* Walk through all fraglist skbs. */
-+ skb_walk_frags(skb, iter_skb) {
-+ if (skb_headlen(iter_skb)) {
-+ dma_unmap_single(dev, virt_to_phys(iter_skb->data),
-+ skb_headlen(iter_skb), DMA_TO_DEVICE);
-+ cnt_sg_frag_list += 1;
-+ }
-+
-+ if (cnt_sg_frag_list == num_sg_frag_list)
-+ return;
-+
-+ /* skb fraglist skb had nr_frags, unmap that memory. */
-+ u32 nr_frags = skb_shinfo(iter_skb)->nr_frags;
-+
-+ if (nr_frags == 0)
-+ continue;
-+
-+ for (int i = 0; i < nr_frags; i++) {
-+ skb_frag_t *frag = &skb_shinfo(iter_skb)->frags[i];
-+
-+ /* DMA mapping was not done for zero size segments. */
-+ if (unlikely(skb_frag_size(frag) == 0))
-+ continue;
-+
-+ dma_unmap_page(dev, virt_to_phys(frag),
-+ PAGE_SIZE, DMA_TO_DEVICE);
-+ cnt_sg_frag_list += 1;
-+ if (cnt_sg_frag_list == num_sg_frag_list)
-+ return;
-+ }
-+ }
-+}
-+
-+static u32 edma_tx_skb_sg_fill_desc(struct edma_txdesc_ring *txdesc_ring,
-+ struct edma_txdesc_pri **txdesc,
-+ struct sk_buff *skb, u32 *hw_next_to_use,
-+ struct edma_port_tx_stats *stats)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ u32 start_hw_next_to_use = 0, invalid_frag = 0;
-+ struct edma_txdesc_pri *txd = *txdesc;
-+ struct device *dev = ppe_dev->dev;
-+ struct sk_buff *iter_skb = NULL;
-+ u32 buf_len = 0, num_descs = 0;
-+ u32 num_sg_frag_list = 0;
-+
-+ /* Head skb processed already. */
-+ num_descs++;
-+
-+ if (unlikely(skb_has_frag_list(skb))) {
-+ struct edma_txdesc_pri *start_desc = NULL;
-+ u32 start_idx = 0, end_idx = 0;
-+
-+ /* Hold onto the index mapped to txd.
-+ * This will be the index previous to that of current *hw_next_to_use.
-+ */
-+ start_idx = (((*hw_next_to_use) + EDMA_TX_RING_SIZE_MASK)
-+ & EDMA_TX_RING_SIZE_MASK);
-+ start_desc = txd;
-+ start_hw_next_to_use = *hw_next_to_use;
-+
-+ /* Walk through all fraglist skbs. */
-+ skb_walk_frags(skb, iter_skb) {
-+ dma_addr_t buff_addr;
-+ u32 num_nr_frag = 0;
-+
-+ /* This case could happen during the packet decapsulation.
-+ * All header content might be removed.
-+ */
-+ buf_len = skb_headlen(iter_skb);
-+ if (unlikely(buf_len == 0))
-+ goto skip_primary;
-+
-+ /* We make sure to flush this descriptor later. */
-+ EDMA_TXDESC_MORE_BIT_SET(txd, 1);
-+ EDMA_TXDESC_ENDIAN_SET(txd);
-+
-+ txd = EDMA_TXDESC_PRI_DESC(txdesc_ring, *hw_next_to_use);
-+ memset(txd, 0, sizeof(struct edma_txdesc_pri));
-+ buff_addr = dma_map_single(dev, iter_skb->data,
-+ buf_len, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, buff_addr)) {
-+ dev_dbg(dev, "Unable to dma for fraglist\n");
-+ goto dma_err;
-+ }
-+
-+ EDMA_TXDESC_BUFFER_ADDR_SET(txd, buff_addr);
-+ EDMA_TXDESC_DATA_LEN_SET(txd, buf_len);
-+ EDMA_TXDESC_POOL_ID_SET(txd, 0);
-+
-+ *hw_next_to_use = (*hw_next_to_use + 1) & EDMA_TX_RING_SIZE_MASK;
-+ num_descs += 1;
-+ num_sg_frag_list += 1;
-+
-+ /* skb fraglist skb can have nr_frags. */
-+skip_primary:
-+ if (unlikely(skb_shinfo(iter_skb)->nr_frags)) {
-+ num_nr_frag = edma_tx_skb_nr_frags(txdesc_ring, &txd,
-+ iter_skb, hw_next_to_use,
-+ &invalid_frag);
-+ if (unlikely(!num_nr_frag)) {
-+ dev_dbg(dev, "No descriptor available for ring %d\n",
-+ txdesc_ring->id);
-+ edma_tx_dma_unmap_frags(iter_skb, invalid_frag);
-+ goto dma_err;
-+ }
-+
-+ num_descs += num_nr_frag;
-+ num_sg_frag_list += num_nr_frag;
-+
-+ /* Update fraglist with nr_frag stats. */
-+ u64_stats_update_begin(&stats->syncp);
-+ stats->tx_fraglist_with_nr_frags_pkts++;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+ }
-+
-+ EDMA_TXDESC_ENDIAN_SET(txd);
-+
-+ /* This will be the index previous to
-+ * that of current *hw_next_to_use.
-+ */
-+ end_idx = (((*hw_next_to_use) + EDMA_TX_RING_SIZE_MASK) &
-+ EDMA_TX_RING_SIZE_MASK);
-+
-+ /* Update frag_list stats. */
-+ u64_stats_update_begin(&stats->syncp);
-+ stats->tx_fraglist_pkts++;
-+ u64_stats_update_end(&stats->syncp);
-+ } else {
-+ /* Process skb with nr_frags. */
-+ num_descs += edma_tx_skb_nr_frags(txdesc_ring, &txd, skb,
-+ hw_next_to_use, &invalid_frag);
-+ if (unlikely(!num_descs)) {
-+ dev_dbg(dev, "No descriptor available for ring %d\n", txdesc_ring->id);
-+ edma_tx_dma_unmap_frags(skb, invalid_frag);
-+ *txdesc = NULL;
-+ return num_descs;
-+ }
-+
-+ u64_stats_update_begin(&stats->syncp);
-+ stats->tx_nr_frag_pkts++;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+
-+ dev_dbg(dev, "skb:%p num_descs_filled: %u, nr_frags %u, frag_list fragments %u\n",
-+ skb, num_descs, skb_shinfo(skb)->nr_frags, num_sg_frag_list);
-+
-+ *txdesc = txd;
-+
-+ return num_descs;
-+
-+dma_err:
-+ if (!num_sg_frag_list)
-+ goto reset_state;
-+
-+ edma_tx_handle_dma_err(skb, num_sg_frag_list);
-+
-+reset_state:
-+ *hw_next_to_use = start_hw_next_to_use;
-+ *txdesc = NULL;
-+
-+ return 0;
-+}
-+
-+static u32 edma_tx_avail_desc(struct edma_txdesc_ring *txdesc_ring,
-+ u32 hw_next_to_use)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ u32 data = 0, avail = 0, hw_next_to_clean = 0;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 reg;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id);
-+ regmap_read(regmap, reg, &data);
-+ hw_next_to_clean = data & EDMA_TXDESC_CONS_IDX_MASK;
-+
-+ avail = EDMA_DESC_AVAIL_COUNT(hw_next_to_clean - 1,
-+ hw_next_to_use, EDMA_TX_RING_SIZE);
-+
-+ return avail;
-+}
-+
-+/**
-+ * edma_tx_ring_xmit - Transmit a packet.
-+ * @netdev: Netdevice.
-+ * @skb: Socket Buffer.
-+ * @txdesc_ring: Tx Descriptor ring.
-+ * @stats: EDMA Tx Statistics.
-+ *
-+ * Check for available descriptors, fill the descriptors
-+ * and transmit both linear and non linear packets.
-+ *
-+ * Return 0 on success, negative error code on failure.
-+ */
-+enum edma_tx_status edma_tx_ring_xmit(struct net_device *netdev,
-+ struct sk_buff *skb, struct edma_txdesc_ring *txdesc_ring,
-+ struct edma_port_tx_stats *stats)
-+{
-+ struct edma_txdesc_stats *txdesc_stats = &txdesc_ring->txdesc_stats;
-+ struct edma_port_priv *port_priv = netdev_priv(netdev);
-+ u32 num_tx_desc_needed = 0, num_desc_filled = 0;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct ppe_port *port = port_priv->ppe_port;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ struct edma_txdesc_pri *txdesc = NULL;
-+ struct device *dev = ppe_dev->dev;
-+ int port_id = port->port_id;
-+ u32 hw_next_to_use = 0;
-+ u32 reg;
-+
-+ hw_next_to_use = txdesc_ring->prod_idx;
-+
-+ if (unlikely(!(txdesc_ring->avail_desc))) {
-+ txdesc_ring->avail_desc = edma_tx_avail_desc(txdesc_ring,
-+ hw_next_to_use);
-+ if (unlikely(!txdesc_ring->avail_desc)) {
-+ netdev_dbg(netdev, "No available descriptors are present at %d ring\n",
-+ txdesc_ring->id);
-+
-+ u64_stats_update_begin(&txdesc_stats->syncp);
-+ ++txdesc_stats->no_desc_avail;
-+ u64_stats_update_end(&txdesc_stats->syncp);
-+ return EDMA_TX_FAIL_NO_DESC;
-+ }
-+ }
-+
-+ /* Process head skb for linear skb.
-+ * Process head skb + nr_frags + fraglist for non linear skb.
-+ */
-+ if (likely(!skb_is_nonlinear(skb))) {
-+ txdesc = edma_tx_skb_first_desc(port_priv, txdesc_ring, skb,
-+ &hw_next_to_use, stats);
-+ if (unlikely(!txdesc)) {
-+ netdev_dbg(netdev, "No descriptor available for ring %d\n",
-+ txdesc_ring->id);
-+ u64_stats_update_begin(&txdesc_stats->syncp);
-+ ++txdesc_stats->no_desc_avail;
-+ u64_stats_update_end(&txdesc_stats->syncp);
-+ return EDMA_TX_FAIL_NO_DESC;
-+ }
-+
-+ EDMA_TXDESC_ENDIAN_SET(txdesc);
-+ num_desc_filled++;
-+ } else {
-+ num_tx_desc_needed = edma_tx_num_descs_for_sg(skb);
-+
-+ /* HW does not support TSO for packets with more than 32 segments.
-+ * HW hangs up if it sees more than 32 segments. Kernel Perform GSO
-+ * for such packets with netdev gso_max_segs set to 32.
-+ */
-+ if (unlikely(num_tx_desc_needed > EDMA_TX_TSO_SEG_MAX)) {
-+ netdev_dbg(netdev, "Number of segments %u more than %u for %d ring\n",
-+ num_tx_desc_needed, EDMA_TX_TSO_SEG_MAX, txdesc_ring->id);
-+ u64_stats_update_begin(&txdesc_stats->syncp);
-+ ++txdesc_stats->tso_max_seg_exceed;
-+ u64_stats_update_end(&txdesc_stats->syncp);
-+
-+ u64_stats_update_begin(&stats->syncp);
-+ stats->tx_tso_drop_pkts++;
-+ u64_stats_update_end(&stats->syncp);
-+
-+ return EDMA_TX_FAIL;
-+ }
-+
-+ if (unlikely(num_tx_desc_needed > txdesc_ring->avail_desc)) {
-+ txdesc_ring->avail_desc = edma_tx_avail_desc(txdesc_ring,
-+ hw_next_to_use);
-+ if (num_tx_desc_needed > txdesc_ring->avail_desc) {
-+ u64_stats_update_begin(&txdesc_stats->syncp);
-+ ++txdesc_stats->no_desc_avail;
-+ u64_stats_update_end(&txdesc_stats->syncp);
-+ netdev_dbg(netdev, "Not enough available descriptors are present at %d ring for SG packet. Needed %d, currently available %d\n",
-+ txdesc_ring->id, num_tx_desc_needed,
-+ txdesc_ring->avail_desc);
-+ return EDMA_TX_FAIL_NO_DESC;
-+ }
-+ }
-+
-+ txdesc = edma_tx_skb_first_desc(port_priv, txdesc_ring, skb,
-+ &hw_next_to_use, stats);
-+ if (unlikely(!txdesc)) {
-+ netdev_dbg(netdev, "No non-linear descriptor available for ring %d\n",
-+ txdesc_ring->id);
-+ u64_stats_update_begin(&txdesc_stats->syncp);
-+ ++txdesc_stats->no_desc_avail;
-+ u64_stats_update_end(&txdesc_stats->syncp);
-+ return EDMA_TX_FAIL_NO_DESC;
-+ }
-+
-+ num_desc_filled = edma_tx_skb_sg_fill_desc(txdesc_ring,
-+ &txdesc, skb, &hw_next_to_use, stats);
-+ if (unlikely(!txdesc)) {
-+ netdev_dbg(netdev, "No descriptor available for ring %d\n",
-+ txdesc_ring->id);
-+ dma_unmap_single(dev, virt_to_phys(skb->data),
-+ skb->len, DMA_TO_DEVICE);
-+ u64_stats_update_begin(&txdesc_stats->syncp);
-+ ++txdesc_stats->no_desc_avail;
-+ u64_stats_update_end(&txdesc_stats->syncp);
-+ return EDMA_TX_FAIL_NO_DESC;
-+ }
-+ }
-+
-+ /* Set the skb pointer to the descriptor's opaque field/s
-+ * on the last descriptor of the packet/SG packet.
-+ */
-+ EDMA_TXDESC_OPAQUE_SET(txdesc, skb);
-+
-+ /* Update producer index. */
-+ txdesc_ring->prod_idx = hw_next_to_use & EDMA_TXDESC_PROD_IDX_MASK;
-+ txdesc_ring->avail_desc -= num_desc_filled;
-+
-+ netdev_dbg(netdev, "%s: skb:%p tx_ring:%u proto:0x%x skb->len:%d\n port:%u prod_idx:%u ip_summed:0x%x\n",
-+ netdev->name, skb, txdesc_ring->id, ntohs(skb->protocol),
-+ skb->len, port_id, hw_next_to_use, skb->ip_summed);
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id);
-+ regmap_write(regmap, reg, txdesc_ring->prod_idx);
-+
-+ u64_stats_update_begin(&stats->syncp);
-+ stats->tx_pkts++;
-+ stats->tx_bytes += skb->len;
-+ u64_stats_update_end(&stats->syncp);
-+
-+ return EDMA_TX_OK;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_tx.h
-@@ -0,0 +1,302 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef __EDMA_TX__
-+#define __EDMA_TX__
-+
-+#include "edma_port.h"
-+
-+#define EDMA_GET_DESC(R, i, type) (&(((type *)((R)->desc))[(i)]))
-+#define EDMA_GET_PDESC(R, i, type) (&(((type *)((R)->pdesc))[(i)]))
-+#define EDMA_GET_SDESC(R, i, type) (&(((type *)((R)->sdesc))[(i)]))
-+#define EDMA_TXCMPL_DESC(R, i) EDMA_GET_DESC(R, i, \
-+ struct edma_txcmpl_desc)
-+#define EDMA_TXDESC_PRI_DESC(R, i) EDMA_GET_PDESC(R, i, \
-+ struct edma_txdesc_pri)
-+#define EDMA_TXDESC_SEC_DESC(R, i) EDMA_GET_SDESC(R, i, \
-+ struct edma_txdesc_sec)
-+
-+#define EDMA_DESC_AVAIL_COUNT(head, tail, _max) ({ \
-+ typeof(_max) (max) = (_max); \
-+ ((((head) - (tail)) + \
-+ (max)) & ((max) - 1)); })
-+
-+#define EDMA_TX_RING_SIZE 2048
-+#define EDMA_TX_RING_SIZE_MASK (EDMA_TX_RING_SIZE - 1)
-+
-+/* Max segment processing capacity of HW for TSO. */
-+#define EDMA_TX_TSO_SEG_MAX 32
-+
-+/* HW defined low and high MSS size. */
-+#define EDMA_TX_TSO_MSS_MIN 256
-+#define EDMA_TX_TSO_MSS_MAX 10240
-+
-+#define EDMA_DST_PORT_TYPE 2
-+#define EDMA_DST_PORT_TYPE_SHIFT 28
-+#define EDMA_DST_PORT_TYPE_MASK (0xf << EDMA_DST_PORT_TYPE_SHIFT)
-+#define EDMA_DST_PORT_ID_SHIFT 16
-+#define EDMA_DST_PORT_ID_MASK (0xfff << EDMA_DST_PORT_ID_SHIFT)
-+
-+#define EDMA_DST_PORT_TYPE_SET(x) (((x) << EDMA_DST_PORT_TYPE_SHIFT) & \
-+ EDMA_DST_PORT_TYPE_MASK)
-+#define EDMA_DST_PORT_ID_SET(x) (((x) << EDMA_DST_PORT_ID_SHIFT) & \
-+ EDMA_DST_PORT_ID_MASK)
-+#define EDMA_DST_INFO_SET(desc, x) ((desc)->word4 |= \
-+ (EDMA_DST_PORT_TYPE_SET(EDMA_DST_PORT_TYPE) | EDMA_DST_PORT_ID_SET(x)))
-+
-+#define EDMA_TXDESC_TSO_ENABLE_MASK BIT(24)
-+#define EDMA_TXDESC_TSO_ENABLE_SET(desc, x) ((desc)->word5 |= \
-+ FIELD_PREP(EDMA_TXDESC_TSO_ENABLE_MASK, x))
-+#define EDMA_TXDESC_MSS_MASK GENMASK(31, 16)
-+#define EDMA_TXDESC_MSS_SET(desc, x) ((desc)->word6 |= \
-+ FIELD_PREP(EDMA_TXDESC_MSS_MASK, x))
-+#define EDMA_TXDESC_MORE_BIT_MASK BIT(30)
-+#define EDMA_TXDESC_MORE_BIT_SET(desc, x) ((desc)->word1 |= \
-+ FIELD_PREP(EDMA_TXDESC_MORE_BIT_MASK, x))
-+
-+#define EDMA_TXDESC_ADV_OFFSET_BIT BIT(31)
-+#define EDMA_TXDESC_ADV_OFFLOAD_SET(desc) ((desc)->word5 |= \
-+ FIELD_PREP(EDMA_TXDESC_ADV_OFFSET_BIT, 1))
-+#define EDMA_TXDESC_IP_CSUM_BIT BIT(25)
-+#define EDMA_TXDESC_IP_CSUM_SET(desc) ((desc)->word5 |= \
-+ FIELD_PREP(EDMA_TXDESC_IP_CSUM_BIT, 1))
-+
-+#define EDMA_TXDESC_L4_CSUM_SET_MASK GENMASK(27, 26)
-+#define EDMA_TXDESC_L4_CSUM_SET(desc) ((desc)->word5 |= \
-+ (FIELD_PREP(EDMA_TXDESC_L4_CSUM_SET_MASK, 1)))
-+
-+#define EDMA_TXDESC_POOL_ID_SET_MASK GENMASK(24, 18)
-+#define EDMA_TXDESC_POOL_ID_SET(desc, x) ((desc)->word5 |= \
-+ (FIELD_PREP(EDMA_TXDESC_POOL_ID_SET_MASK, x)))
-+
-+#define EDMA_TXDESC_DATA_LEN_SET(desc, x) ((desc)->word5 |= ((x) & 0x1ffff))
-+#define EDMA_TXDESC_SERVICE_CODE_MASK GENMASK(24, 16)
-+#define EDMA_TXDESC_SERVICE_CODE_SET(desc, x) ((desc)->word1 |= \
-+ (FIELD_PREP(EDMA_TXDESC_SERVICE_CODE_MASK, x)))
-+#define EDMA_TXDESC_BUFFER_ADDR_SET(desc, addr) (((desc)->word0) = (addr))
-+
-+#ifdef __LP64__
-+#define EDMA_TXDESC_OPAQUE_GET(_desc) ({ \
-+ typeof(_desc) (desc) = (_desc); \
-+ (((u64)(desc)->word3 << 32) | (desc)->word2); })
-+
-+#define EDMA_TXCMPL_OPAQUE_GET(_desc) ({ \
-+ typeof(_desc) (desc) = (_desc); \
-+ (((u64)(desc)->word1 << 32) | \
-+ (desc)->word0); })
-+
-+#define EDMA_TXDESC_OPAQUE_LO_SET(desc, ptr) ((desc)->word2 = \
-+ (u32)(uintptr_t)(ptr))
-+
-+#define EDMA_TXDESC_OPAQUE_HI_SET(desc, ptr) ((desc)->word3 = \
-+ (u32)((u64)(ptr) >> 32))
-+
-+#define EDMA_TXDESC_OPAQUE_SET(_desc, _ptr) do { \
-+ typeof(_desc) (desc) = (_desc); \
-+ typeof(_ptr) (ptr) = (_ptr); \
-+ EDMA_TXDESC_OPAQUE_LO_SET(desc, ptr); \
-+ EDMA_TXDESC_OPAQUE_HI_SET(desc, ptr); \
-+} while (0)
-+#else
-+#define EDMA_TXCMPL_OPAQUE_GET(desc) ((desc)->word0)
-+#define EDMA_TXDESC_OPAQUE_GET(desc) ((desc)->word2)
-+#define EDMA_TXDESC_OPAQUE_LO_SET(desc, ptr) ((desc)->word2 = (u32)(uintptr_t)ptr)
-+
-+#define EDMA_TXDESC_OPAQUE_SET(desc, ptr) \
-+ EDMA_TXDESC_OPAQUE_LO_SET(desc, ptr)
-+#endif
-+#define EDMA_TXCMPL_MORE_BIT_MASK BIT(30)
-+
-+#define EDMA_TXCMPL_MORE_BIT_GET(desc) ((le32_to_cpu((__force __le32)((desc)->word2))) & \
-+ EDMA_TXCMPL_MORE_BIT_MASK)
-+
-+#define EDMA_TXCOMP_RING_ERROR_MASK GENMASK(22, 0)
-+
-+#define EDMA_TXCOMP_RING_ERROR_GET(x) ((le32_to_cpu((__force __le32)x)) & \
-+ EDMA_TXCOMP_RING_ERROR_MASK)
-+
-+#define EDMA_TXCOMP_POOL_ID_MASK GENMASK(5, 0)
-+
-+#define EDMA_TXCOMP_POOL_ID_GET(desc) ((le32_to_cpu((__force __le32)((desc)->word2))) & \
-+ EDMA_TXCOMP_POOL_ID_MASK)
-+
-+/* Opaque values are set in word2 and word3,
-+ * they are not accessed by the EDMA HW,
-+ * so endianness conversion is not needed.
-+ */
-+#define EDMA_TXDESC_ENDIAN_SET(_desc) ({ \
-+ typeof(_desc) (desc) = (_desc); \
-+ cpu_to_le32s(&((desc)->word0)); \
-+ cpu_to_le32s(&((desc)->word1)); \
-+ cpu_to_le32s(&((desc)->word4)); \
-+ cpu_to_le32s(&((desc)->word5)); \
-+ cpu_to_le32s(&((desc)->word6)); \
-+ cpu_to_le32s(&((desc)->word7)); \
-+})
-+
-+/* EDMA Tx GSO status */
-+enum edma_tx_status {
-+ EDMA_TX_OK = 0, /* Tx success. */
-+ EDMA_TX_FAIL_NO_DESC = 1, /* Not enough descriptors. */
-+ EDMA_TX_FAIL = 2, /* Tx failure. */
-+};
-+
-+/* EDMA TX GSO status */
-+enum edma_tx_gso_status {
-+ EDMA_TX_GSO_NOT_NEEDED = 0,
-+ /* Packet has segment count less than TX_TSO_SEG_MAX. */
-+ EDMA_TX_GSO_SUCCEED = 1,
-+ /* GSO Succeed. */
-+ EDMA_TX_GSO_FAIL = 2,
-+ /* GSO failed, drop the packet. */
-+};
-+
-+/**
-+ * struct edma_txcmpl_stats - EDMA TX complete ring statistics.
-+ * @invalid_buffer: Invalid buffer address received.
-+ * @errors: Other Tx complete descriptor errors indicated by the hardware.
-+ * @desc_with_more_bit: Packet's segment transmit count.
-+ * @no_pending_desc: No descriptor is pending for processing.
-+ * @syncp: Synchronization pointer.
-+ */
-+struct edma_txcmpl_stats {
-+ u64 invalid_buffer;
-+ u64 errors;
-+ u64 desc_with_more_bit;
-+ u64 no_pending_desc;
-+ struct u64_stats_sync syncp;
-+};
-+
-+/**
-+ * struct edma_txdesc_stats - EDMA Tx descriptor ring statistics.
-+ * @no_desc_avail: No descriptor available to transmit.
-+ * @tso_max_seg_exceed: Packets extending EDMA_TX_TSO_SEG_MAX segments.
-+ * @syncp: Synchronization pointer.
-+ */
-+struct edma_txdesc_stats {
-+ u64 no_desc_avail;
-+ u64 tso_max_seg_exceed;
-+ struct u64_stats_sync syncp;
-+};
-+
-+/**
-+ * struct edma_txdesc_pri - EDMA primary TX descriptor.
-+ * @word0: Low 32-bit of buffer address.
-+ * @word1: Buffer recycling, PTP tag flag, PRI valid flag.
-+ * @word2: Low 32-bit of opaque value.
-+ * @word3: High 32-bit of opaque value.
-+ * @word4: Source/Destination port info.
-+ * @word5: VLAN offload, csum mode, ip_csum_en, tso_en, data len.
-+ * @word6: MSS/hash_value/PTP tag, data offset.
-+ * @word7: L4/L3 offset, PROT type, L2 type, CVLAN/SVLAN tag, service code.
-+ */
-+struct edma_txdesc_pri {
-+ u32 word0;
-+ u32 word1;
-+ u32 word2;
-+ u32 word3;
-+ u32 word4;
-+ u32 word5;
-+ u32 word6;
-+ u32 word7;
-+};
-+
-+/**
-+ * struct edma_txdesc_sec - EDMA secondary TX descriptor.
-+ * @word0: Reserved.
-+ * @word1: Custom csum offset, payload offset, TTL/NAT action.
-+ * @word2: NAPT translated port, DSCP value, TTL value.
-+ * @word3: Flow index value and valid flag.
-+ * @word4: Reserved.
-+ * @word5: Reserved.
-+ * @word6: CVLAN/SVLAN command.
-+ * @word7: CVLAN/SVLAN tag value.
-+ */
-+struct edma_txdesc_sec {
-+ u32 word0;
-+ u32 word1;
-+ u32 word2;
-+ u32 word3;
-+ u32 word4;
-+ u32 word5;
-+ u32 word6;
-+ u32 word7;
-+};
-+
-+/**
-+ * struct edma_txcmpl_desc - EDMA TX complete descriptor.
-+ * @word0: Low 32-bit opaque value.
-+ * @word1: High 32-bit opaque value.
-+ * @word2: More fragment, transmit ring id, pool id.
-+ * @word3: Error indications.
-+ */
-+struct edma_txcmpl_desc {
-+ u32 word0;
-+ u32 word1;
-+ u32 word2;
-+ u32 word3;
-+};
-+
-+/**
-+ * struct edma_txdesc_ring - EDMA TX descriptor ring
-+ * @prod_idx: Producer index
-+ * @id: Tx ring number
-+ * @avail_desc: Number of available descriptor to process
-+ * @pdesc: Primary descriptor ring virtual address
-+ * @pdma: Primary descriptor ring physical address
-+ * @sdesc: Secondary descriptor ring virtual address
-+ * @tx_desc_stats: Tx descriptor ring statistics
-+ * @sdma: Secondary descriptor ring physical address
-+ * @count: Number of descriptors
-+ * @fc_grp_id: Flow control group ID
-+ */
-+struct edma_txdesc_ring {
-+ u32 prod_idx;
-+ u32 id;
-+ u32 avail_desc;
-+ struct edma_txdesc_pri *pdesc;
-+ dma_addr_t pdma;
-+ struct edma_txdesc_sec *sdesc;
-+ struct edma_txdesc_stats txdesc_stats;
-+ dma_addr_t sdma;
-+ u32 count;
-+ u8 fc_grp_id;
-+};
-+
-+/**
-+ * struct edma_txcmpl_ring - EDMA TX complete ring
-+ * @napi: NAPI
-+ * @cons_idx: Consumer index
-+ * @avail_pkt: Number of available packets to process
-+ * @desc: Descriptor ring virtual address
-+ * @id: Txcmpl ring number
-+ * @tx_cmpl_stats: Tx complete ring statistics
-+ * @dma: Descriptor ring physical address
-+ * @count: Number of descriptors in the ring
-+ * @napi_added: Flag to indicate NAPI add status
-+ */
-+struct edma_txcmpl_ring {
-+ struct napi_struct napi;
-+ u32 cons_idx;
-+ u32 avail_pkt;
-+ struct edma_txcmpl_desc *desc;
-+ u32 id;
-+ struct edma_txcmpl_stats txcmpl_stats;
-+ dma_addr_t dma;
-+ u32 count;
-+ bool napi_added;
-+};
-+
-+enum edma_tx_status edma_tx_ring_xmit(struct net_device *netdev,
-+ struct sk_buff *skb,
-+ struct edma_txdesc_ring *txdesc_ring,
-+ struct edma_port_tx_stats *stats);
-+u32 edma_tx_complete(u32 work_to_do,
-+ struct edma_txcmpl_ring *txcmpl_ring);
-+irqreturn_t edma_tx_handle_irq(int irq, void *ctx);
-+int edma_tx_napi_poll(struct napi_struct *napi, int budget);
-+enum edma_tx_gso_status edma_tx_gso_segment(struct sk_buff *skb,
-+ struct net_device *netdev, struct sk_buff **segs);
-+
-+#endif
+++ /dev/null
-From 4dfbbaa1e9ab01f1126c9e7a89583aad0b6600da Mon Sep 17 00:00:00 2001
-From: Suruchi Agarwal <quic_suruchia@quicinc.com>
-Date: Thu, 21 Mar 2024 16:31:04 -0700
-Subject: [PATCH 42/50] net: ethernet: qualcomm: Add miscellaneous error
- interrupts and counters
-
-Miscellaneous error interrupts, EDMA Tx/Rx and error counters are supported
-using debugfs framework.
-
-Change-Id: I7da8b978a7e93947b03a45269a81b401f35da31c
-Co-developed-by: Pavithra R <quic_pavir@quicinc.com>
-Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
-Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
- drivers/net/ethernet/qualcomm/ppe/edma.c | 162 ++++++++
- drivers/net/ethernet/qualcomm/ppe/edma.h | 31 +-
- .../net/ethernet/qualcomm/ppe/edma_debugfs.c | 370 ++++++++++++++++++
- .../net/ethernet/qualcomm/ppe/ppe_debugfs.c | 17 +
- 5 files changed, 580 insertions(+), 2 deletions(-)
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
-
---- a/drivers/net/ethernet/qualcomm/ppe/Makefile
-+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
-@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
- qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
-
- #EDMA
--qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_port.o edma_rx.o edma_tx.o
-+qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_debugfs.o edma_port.o edma_rx.o edma_tx.o
---- a/drivers/net/ethernet/qualcomm/ppe/edma.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
-@@ -152,6 +152,42 @@ static int edma_clock_init(void)
- }
-
- /**
-+ * edma_err_stats_alloc - Allocate stats memory
-+ *
-+ * Allocate memory for per-CPU error stats.
-+ */
-+int edma_err_stats_alloc(void)
-+{
-+ u32 i;
-+
-+ edma_ctx->err_stats = alloc_percpu(*edma_ctx->err_stats);
-+ if (!edma_ctx->err_stats)
-+ return -ENOMEM;
-+
-+ for_each_possible_cpu(i) {
-+ struct edma_err_stats *stats;
-+
-+ stats = per_cpu_ptr(edma_ctx->err_stats, i);
-+ u64_stats_init(&stats->syncp);
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * edma_err_stats_free - Free stats memory
-+ *
-+ * Free memory of per-CPU error stats.
-+ */
-+void edma_err_stats_free(void)
-+{
-+ if (edma_ctx->err_stats) {
-+ free_percpu(edma_ctx->err_stats);
-+ edma_ctx->err_stats = NULL;
-+ }
-+}
-+
-+/**
- * edma_configure_ucast_prio_map_tbl - Configure unicast priority map table.
- *
- * Map int_priority values to priority class and initialize
-@@ -191,11 +227,113 @@ static int edma_configure_ucast_prio_map
- return ret;
- }
-
-+static void edma_disable_misc_interrupt(void)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 reg;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_MASK_ADDR;
-+ regmap_write(regmap, reg, EDMA_MASK_INT_CLEAR);
-+}
-+
-+static void edma_enable_misc_interrupt(void)
-+{
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 reg;
-+
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_MASK_ADDR;
-+ regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_misc);
-+}
-+
-+static irqreturn_t edma_misc_handle_irq(int irq,
-+ __maybe_unused void *ctx)
-+{
-+ struct edma_err_stats *stats = this_cpu_ptr(edma_ctx->err_stats);
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
-+ struct regmap *regmap = ppe_dev->regmap;
-+ u32 misc_intr_status, data, reg;
-+
-+ /* Read Misc intr status */
-+ reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_STAT_ADDR;
-+ regmap_read(regmap, reg, &data);
-+ misc_intr_status = data & edma_ctx->intr_info.intr_mask_misc;
-+
-+ pr_debug("Received misc irq %d, status: %d\n", irq, misc_intr_status);
-+
-+ if (FIELD_GET(EDMA_MISC_AXI_RD_ERR_MASK, misc_intr_status)) {
-+ pr_err("MISC AXI read error received\n");
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->edma_axi_read_err;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+
-+ if (FIELD_GET(EDMA_MISC_AXI_WR_ERR_MASK, misc_intr_status)) {
-+ pr_err("MISC AXI write error received\n");
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->edma_axi_write_err;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+
-+ if (FIELD_GET(EDMA_MISC_RX_DESC_FIFO_FULL_MASK, misc_intr_status)) {
-+ if (net_ratelimit())
-+ pr_err("MISC Rx descriptor fifo full error received\n");
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->edma_rxdesc_fifo_full;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+
-+ if (FIELD_GET(EDMA_MISC_RX_ERR_BUF_SIZE_MASK, misc_intr_status)) {
-+ if (net_ratelimit())
-+ pr_err("MISC Rx buffer size error received\n");
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->edma_rx_buf_size_err;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+
-+ if (FIELD_GET(EDMA_MISC_TX_SRAM_FULL_MASK, misc_intr_status)) {
-+ if (net_ratelimit())
-+ pr_err("MISC Tx SRAM full error received\n");
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->edma_tx_sram_full;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+
-+ if (FIELD_GET(EDMA_MISC_TX_CMPL_BUF_FULL_MASK, misc_intr_status)) {
-+ if (net_ratelimit())
-+ pr_err("MISC Tx complete buffer full error received\n");
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->edma_txcmpl_buf_full;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+
-+ if (FIELD_GET(EDMA_MISC_DATA_LEN_ERR_MASK, misc_intr_status)) {
-+ if (net_ratelimit())
-+ pr_err("MISC data length error received\n");
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->edma_tx_data_len_err;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+
-+ if (FIELD_GET(EDMA_MISC_TX_TIMEOUT_MASK, misc_intr_status)) {
-+ if (net_ratelimit())
-+ pr_err("MISC Tx timeout error received\n");
-+ u64_stats_update_begin(&stats->syncp);
-+ ++stats->edma_tx_timeout;
-+ u64_stats_update_end(&stats->syncp);
-+ }
-+
-+ return IRQ_HANDLED;
-+}
-+
- static int edma_irq_register(void)
- {
- struct edma_hw_info *hw_info = edma_ctx->hw_info;
- struct edma_ring_info *txcmpl = hw_info->txcmpl;
-+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
- struct edma_ring_info *rx = hw_info->rx;
-+ struct device *dev = ppe_dev->dev;
- int ret;
- u32 i;
-
-@@ -270,8 +408,25 @@ static int edma_irq_register(void)
- edma_rxdesc_irq_name[i]);
- }
-
-+ /* Request Misc IRQ */
-+ ret = request_irq(edma_ctx->intr_info.intr_misc, edma_misc_handle_irq,
-+ IRQF_SHARED, "edma_misc",
-+ (void *)dev);
-+ if (ret) {
-+ pr_err("MISC IRQ:%d request failed\n",
-+ edma_ctx->intr_info.intr_misc);
-+ goto misc_intr_req_fail;
-+ }
-+
- return 0;
-
-+misc_intr_req_fail:
-+ /* Free IRQ for RXDESC rings */
-+ for (i = 0; i < rx->num_rings; i++) {
-+ synchronize_irq(edma_ctx->intr_info.intr_rx[i]);
-+ free_irq(edma_ctx->intr_info.intr_rx[i],
-+ (void *)&edma_ctx->rx_rings[i]);
-+ }
- rx_desc_ring_intr_req_fail:
- for (i = 0; i < rx->num_rings; i++)
- kfree(edma_rxdesc_irq_name[i]);
-@@ -503,6 +658,7 @@ static int edma_hw_configure(void)
- edma_cfg_tx_disable_interrupts(i);
-
- edma_cfg_rx_disable_interrupts();
-+ edma_disable_misc_interrupt();
-
- edma_cfg_rx_rings_disable();
-
-@@ -614,6 +770,7 @@ void edma_destroy(struct ppe_device *ppe
- edma_cfg_tx_disable_interrupts(i);
-
- edma_cfg_rx_disable_interrupts();
-+ edma_disable_misc_interrupt();
-
- /* Free IRQ for TXCMPL rings. */
- for (i = 0; i < txcmpl->num_rings; i++) {
-@@ -634,6 +791,10 @@ void edma_destroy(struct ppe_device *ppe
- }
- kfree(edma_rxdesc_irq_name);
-
-+ /* Free Misc IRQ */
-+ synchronize_irq(edma_ctx->intr_info.intr_misc);
-+ free_irq(edma_ctx->intr_info.intr_misc, (void *)(ppe_dev->dev));
-+
- kfree(edma_ctx->intr_info.intr_rx);
- kfree(edma_ctx->intr_info.intr_txcmpl);
-
-@@ -699,6 +860,7 @@ int edma_setup(struct ppe_device *ppe_de
- }
-
- edma_cfg_rx_enable_interrupts();
-+ edma_enable_misc_interrupt();
-
- dev_info(dev, "EDMA configuration successful\n");
-
---- a/drivers/net/ethernet/qualcomm/ppe/edma.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
-@@ -37,6 +37,30 @@
- (max)) & ((max) - 1)); })
-
- /**
-+ * struct edma_err_stats - EDMA error stats
-+ * @edma_axi_read_err: AXI read error
-+ * @edma_axi_write_err: AXI write error
-+ * @edma_rxdesc_fifo_full: Rx desc FIFO full error
-+ * @edma_rx_buf_size_err: Rx buffer size too small error
-+ * @edma_tx_sram_full: Tx packet SRAM buffer full error
-+ * @edma_tx_data_len_err: Tx data length error
-+ * @edma_tx_timeout: Tx timeout error
-+ * @edma_txcmpl_buf_full: Tx completion buffer full error
-+ * @syncp: Synchronization pointer
-+ */
-+struct edma_err_stats {
-+ u64 edma_axi_read_err;
-+ u64 edma_axi_write_err;
-+ u64 edma_rxdesc_fifo_full;
-+ u64 edma_rx_buf_size_err;
-+ u64 edma_tx_sram_full;
-+ u64 edma_tx_data_len_err;
-+ u64 edma_tx_timeout;
-+ u64 edma_txcmpl_buf_full;
-+ struct u64_stats_sync syncp;
-+};
-+
-+/**
- * struct edma_ring_info - EDMA ring data structure.
- * @max_rings: Maximum number of rings
- * @ring_start: Ring start ID
-@@ -97,6 +121,7 @@ struct edma_intr_info {
- * @rx_rings: Rx Desc Rings, SW is consumer
- * @tx_rings: Tx Descriptor Ring, SW is producer
- * @txcmpl_rings: Tx complete Ring, SW is consumer
-+ * @err_stats: Per CPU error statistics
- * @rx_page_mode: Page mode enabled or disabled
- * @rx_buf_size: Rx buffer size for Jumbo MRU
- * @tx_requeue_stop: Tx requeue stop enabled or disabled
-@@ -111,6 +136,7 @@ struct edma_context {
- struct edma_rxdesc_ring *rx_rings;
- struct edma_txdesc_ring *tx_rings;
- struct edma_txcmpl_ring *txcmpl_rings;
-+ struct edma_err_stats __percpu *err_stats;
- u32 rx_page_mode;
- u32 rx_buf_size;
- bool tx_requeue_stop;
-@@ -119,7 +145,10 @@ struct edma_context {
- /* Global EDMA context */
- extern struct edma_context *edma_ctx;
-
-+int edma_err_stats_alloc(void);
-+void edma_err_stats_free(void);
- void edma_destroy(struct ppe_device *ppe_dev);
- int edma_setup(struct ppe_device *ppe_dev);
--
-+void edma_debugfs_teardown(void);
-+int edma_debugfs_setup(struct ppe_device *ppe_dev);
- #endif
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
-@@ -0,0 +1,370 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* EDMA debugfs routines for display of Tx/Rx counters. */
-+
-+#include <linux/cpumask.h>
-+#include <linux/debugfs.h>
-+#include <linux/kernel.h>
-+#include <linux/netdevice.h>
-+#include <linux/printk.h>
-+
-+#include "edma.h"
-+
-+#define EDMA_STATS_BANNER_MAX_LEN 80
-+#define EDMA_RX_RING_STATS_NODE_NAME "EDMA_RX"
-+#define EDMA_TX_RING_STATS_NODE_NAME "EDMA_TX"
-+#define EDMA_ERR_STATS_NODE_NAME "EDMA_ERR"
-+
-+static struct dentry *edma_dentry;
-+static struct dentry *stats_dentry;
-+
-+static void edma_debugfs_print_banner(struct seq_file *m, char *node)
-+{
-+ u32 banner_char_len, i;
-+
-+ for (i = 0; i < EDMA_STATS_BANNER_MAX_LEN; i++)
-+ seq_puts(m, "_");
-+ banner_char_len = (EDMA_STATS_BANNER_MAX_LEN - (strlen(node) + 2)) / 2;
-+ seq_puts(m, "\n\n");
-+
-+ for (i = 0; i < banner_char_len; i++)
-+ seq_puts(m, "<");
-+ seq_printf(m, " %s ", node);
-+
-+ for (i = 0; i < banner_char_len; i++)
-+ seq_puts(m, ">");
-+ seq_puts(m, "\n");
-+
-+ for (i = 0; i < EDMA_STATS_BANNER_MAX_LEN; i++)
-+ seq_puts(m, "_");
-+ seq_puts(m, "\n\n");
-+}
-+
-+static int edma_debugfs_rx_rings_stats_show(struct seq_file *m,
-+ void __maybe_unused *p)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *rxfill = hw_info->rxfill;
-+ struct edma_rxfill_stats *rxfill_stats;
-+ struct edma_rxdesc_stats *rxdesc_stats;
-+ struct edma_ring_info *rx = hw_info->rx;
-+ unsigned int start;
-+ u32 i;
-+
-+ rxfill_stats = kcalloc(rxfill->num_rings, sizeof(*rxfill_stats), GFP_KERNEL);
-+ if (!rxfill_stats)
-+ return -ENOMEM;
-+
-+ rxdesc_stats = kcalloc(rx->num_rings, sizeof(*rxdesc_stats), GFP_KERNEL);
-+ if (!rxdesc_stats) {
-+ kfree(rxfill_stats);
-+ return -ENOMEM;
-+ }
-+
-+ /* Get stats for Rx fill rings. */
-+ for (i = 0; i < rxfill->num_rings; i++) {
-+ struct edma_rxfill_ring *rxfill_ring;
-+ struct edma_rxfill_stats *stats;
-+
-+ rxfill_ring = &edma_ctx->rxfill_rings[i];
-+ stats = &rxfill_ring->rxfill_stats;
-+ do {
-+ start = u64_stats_fetch_begin(&stats->syncp);
-+ rxfill_stats[i].alloc_failed = stats->alloc_failed;
-+ rxfill_stats[i].page_alloc_failed = stats->page_alloc_failed;
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
-+ }
-+
-+ /* Get stats for Rx Desc rings. */
-+ for (i = 0; i < rx->num_rings; i++) {
-+ struct edma_rxdesc_ring *rxdesc_ring;
-+ struct edma_rxdesc_stats *stats;
-+
-+ rxdesc_ring = &edma_ctx->rx_rings[i];
-+ stats = &rxdesc_ring->rxdesc_stats;
-+ do {
-+ start = u64_stats_fetch_begin(&stats->syncp);
-+ rxdesc_stats[i].src_port_inval = stats->src_port_inval;
-+ rxdesc_stats[i].src_port_inval_type = stats->src_port_inval_type;
-+ rxdesc_stats[i].src_port_inval_netdev = stats->src_port_inval_netdev;
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
-+ }
-+
-+ edma_debugfs_print_banner(m, EDMA_RX_RING_STATS_NODE_NAME);
-+
-+ seq_puts(m, "\n#EDMA RX descriptor rings stats:\n\n");
-+ for (i = 0; i < rx->num_rings; i++) {
-+ seq_printf(m, "\t\tEDMA RX descriptor %d ring stats:\n", i + rx->ring_start);
-+ seq_printf(m, "\t\t rxdesc[%d]:src_port_inval = %llu\n",
-+ i + rx->ring_start, rxdesc_stats[i].src_port_inval);
-+ seq_printf(m, "\t\t rxdesc[%d]:src_port_inval_type = %llu\n",
-+ i + rx->ring_start, rxdesc_stats[i].src_port_inval_type);
-+ seq_printf(m, "\t\t rxdesc[%d]:src_port_inval_netdev = %llu\n",
-+ i + rx->ring_start,
-+ rxdesc_stats[i].src_port_inval_netdev);
-+ seq_puts(m, "\n");
-+ }
-+
-+ seq_puts(m, "\n#EDMA RX fill rings stats:\n\n");
-+ for (i = 0; i < rxfill->num_rings; i++) {
-+ seq_printf(m, "\t\tEDMA RX fill %d ring stats:\n", i + rxfill->ring_start);
-+ seq_printf(m, "\t\t rxfill[%d]:alloc_failed = %llu\n",
-+ i + rxfill->ring_start, rxfill_stats[i].alloc_failed);
-+ seq_printf(m, "\t\t rxfill[%d]:page_alloc_failed = %llu\n",
-+ i + rxfill->ring_start, rxfill_stats[i].page_alloc_failed);
-+ seq_puts(m, "\n");
-+ }
-+
-+ kfree(rxfill_stats);
-+ kfree(rxdesc_stats);
-+ return 0;
-+}
-+
-+static int edma_debugfs_tx_rings_stats_show(struct seq_file *m,
-+ void __maybe_unused *p)
-+{
-+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
-+ struct edma_ring_info *txcmpl = hw_info->txcmpl;
-+ struct edma_ring_info *tx = hw_info->tx;
-+ struct edma_txcmpl_stats *txcmpl_stats;
-+ struct edma_txdesc_stats *txdesc_stats;
-+ unsigned int start;
-+ u32 i;
-+
-+ txcmpl_stats = kcalloc(txcmpl->num_rings, sizeof(*txcmpl_stats), GFP_KERNEL);
-+ if (!txcmpl_stats)
-+ return -ENOMEM;
-+
-+ txdesc_stats = kcalloc(tx->num_rings, sizeof(*txdesc_stats), GFP_KERNEL);
-+ if (!txdesc_stats) {
-+ kfree(txcmpl_stats);
-+ return -ENOMEM;
-+ }
-+
-+ /* Get stats for Tx desc rings. */
-+ for (i = 0; i < tx->num_rings; i++) {
-+ struct edma_txdesc_ring *txdesc_ring;
-+ struct edma_txdesc_stats *stats;
-+
-+ txdesc_ring = &edma_ctx->tx_rings[i];
-+ stats = &txdesc_ring->txdesc_stats;
-+ do {
-+ start = u64_stats_fetch_begin(&stats->syncp);
-+ txdesc_stats[i].no_desc_avail = stats->no_desc_avail;
-+ txdesc_stats[i].tso_max_seg_exceed = stats->tso_max_seg_exceed;
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
-+ }
-+
-+ /* Get stats for Tx Complete rings. */
-+ for (i = 0; i < txcmpl->num_rings; i++) {
-+ struct edma_txcmpl_ring *txcmpl_ring;
-+ struct edma_txcmpl_stats *stats;
-+
-+ txcmpl_ring = &edma_ctx->txcmpl_rings[i];
-+ stats = &txcmpl_ring->txcmpl_stats;
-+ do {
-+ start = u64_stats_fetch_begin(&stats->syncp);
-+ txcmpl_stats[i].invalid_buffer = stats->invalid_buffer;
-+ txcmpl_stats[i].errors = stats->errors;
-+ txcmpl_stats[i].desc_with_more_bit = stats->desc_with_more_bit;
-+ txcmpl_stats[i].no_pending_desc = stats->no_pending_desc;
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
-+ }
-+
-+ edma_debugfs_print_banner(m, EDMA_TX_RING_STATS_NODE_NAME);
-+
-+ seq_puts(m, "\n#EDMA TX complete rings stats:\n\n");
-+ for (i = 0; i < txcmpl->num_rings; i++) {
-+ seq_printf(m, "\t\tEDMA TX complete %d ring stats:\n", i + txcmpl->ring_start);
-+ seq_printf(m, "\t\t txcmpl[%d]:invalid_buffer = %llu\n",
-+ i + txcmpl->ring_start, txcmpl_stats[i].invalid_buffer);
-+ seq_printf(m, "\t\t txcmpl[%d]:errors = %llu\n",
-+ i + txcmpl->ring_start, txcmpl_stats[i].errors);
-+ seq_printf(m, "\t\t txcmpl[%d]:desc_with_more_bit = %llu\n",
-+ i + txcmpl->ring_start, txcmpl_stats[i].desc_with_more_bit);
-+ seq_printf(m, "\t\t txcmpl[%d]:no_pending_desc = %llu\n",
-+ i + txcmpl->ring_start, txcmpl_stats[i].no_pending_desc);
-+ seq_puts(m, "\n");
-+ }
-+
-+ seq_puts(m, "\n#EDMA TX descriptor rings stats:\n\n");
-+ for (i = 0; i < tx->num_rings; i++) {
-+ seq_printf(m, "\t\tEDMA TX descriptor %d ring stats:\n", i + tx->ring_start);
-+ seq_printf(m, "\t\t txdesc[%d]:no_desc_avail = %llu\n",
-+ i + tx->ring_start, txdesc_stats[i].no_desc_avail);
-+ seq_printf(m, "\t\t txdesc[%d]:tso_max_seg_exceed = %llu\n",
-+ i + tx->ring_start, txdesc_stats[i].tso_max_seg_exceed);
-+ seq_puts(m, "\n");
-+ }
-+
-+ kfree(txcmpl_stats);
-+ kfree(txdesc_stats);
-+ return 0;
-+}
-+
-+static int edma_debugfs_err_stats_show(struct seq_file *m,
-+ void __maybe_unused *p)
-+{
-+ struct edma_err_stats *err_stats, *pcpu_err_stats;
-+ unsigned int start;
-+ u32 cpu;
-+
-+ err_stats = kzalloc(sizeof(*err_stats), GFP_KERNEL);
-+ if (!err_stats)
-+ return -ENOMEM;
-+
-+ /* Get percpu EDMA miscellaneous stats. */
-+ for_each_possible_cpu(cpu) {
-+ pcpu_err_stats = per_cpu_ptr(edma_ctx->err_stats, cpu);
-+ do {
-+ start = u64_stats_fetch_begin(&pcpu_err_stats->syncp);
-+ err_stats->edma_axi_read_err +=
-+ pcpu_err_stats->edma_axi_read_err;
-+ err_stats->edma_axi_write_err +=
-+ pcpu_err_stats->edma_axi_write_err;
-+ err_stats->edma_rxdesc_fifo_full +=
-+ pcpu_err_stats->edma_rxdesc_fifo_full;
-+ err_stats->edma_rx_buf_size_err +=
-+ pcpu_err_stats->edma_rx_buf_size_err;
-+ err_stats->edma_tx_sram_full +=
-+ pcpu_err_stats->edma_tx_sram_full;
-+ err_stats->edma_tx_data_len_err +=
-+ pcpu_err_stats->edma_tx_data_len_err;
-+ err_stats->edma_tx_timeout +=
-+ pcpu_err_stats->edma_tx_timeout;
-+ err_stats->edma_txcmpl_buf_full +=
-+ pcpu_err_stats->edma_txcmpl_buf_full;
-+ } while (u64_stats_fetch_retry(&pcpu_err_stats->syncp, start));
-+ }
-+
-+ edma_debugfs_print_banner(m, EDMA_ERR_STATS_NODE_NAME);
-+
-+ seq_puts(m, "\n#EDMA error stats:\n\n");
-+ seq_printf(m, "\t\t axi read error = %llu\n",
-+ err_stats->edma_axi_read_err);
-+ seq_printf(m, "\t\t axi write error = %llu\n",
-+ err_stats->edma_axi_write_err);
-+ seq_printf(m, "\t\t Rx descriptor fifo full = %llu\n",
-+ err_stats->edma_rxdesc_fifo_full);
-+ seq_printf(m, "\t\t Rx buffer size error = %llu\n",
-+ err_stats->edma_rx_buf_size_err);
-+ seq_printf(m, "\t\t Tx SRAM full = %llu\n",
-+ err_stats->edma_tx_sram_full);
-+ seq_printf(m, "\t\t Tx data length error = %llu\n",
-+ err_stats->edma_tx_data_len_err);
-+ seq_printf(m, "\t\t Tx timeout = %llu\n",
-+ err_stats->edma_tx_timeout);
-+ seq_printf(m, "\t\t Tx completion buffer full = %llu\n",
-+ err_stats->edma_txcmpl_buf_full);
-+
-+ kfree(err_stats);
-+ return 0;
-+}
-+
-+static int edma_debugs_rx_rings_stats_open(struct inode *inode,
-+ struct file *file)
-+{
-+ return single_open(file, edma_debugfs_rx_rings_stats_show,
-+ inode->i_private);
-+}
-+
-+static const struct file_operations edma_debugfs_rx_rings_file_ops = {
-+ .open = edma_debugs_rx_rings_stats_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release
-+};
-+
-+static int edma_debugs_tx_rings_stats_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, edma_debugfs_tx_rings_stats_show, inode->i_private);
-+}
-+
-+static const struct file_operations edma_debugfs_tx_rings_file_ops = {
-+ .open = edma_debugs_tx_rings_stats_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release
-+};
-+
-+static int edma_debugs_err_stats_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, edma_debugfs_err_stats_show, inode->i_private);
-+}
-+
-+static const struct file_operations edma_debugfs_misc_file_ops = {
-+ .open = edma_debugs_err_stats_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release
-+};
-+
-+/**
-+ * edma_debugfs_teardown - EDMA debugfs teardown.
-+ *
-+ * EDMA debugfs teardown and free stats memory.
-+ */
-+void edma_debugfs_teardown(void)
-+{
-+ /* Free EDMA miscellaneous stats memory */
-+ edma_err_stats_free();
-+
-+ debugfs_remove_recursive(edma_dentry);
-+ edma_dentry = NULL;
-+ stats_dentry = NULL;
-+}
-+
-+/**
-+ * edma_debugfs_setup - EDMA debugfs setup.
-+ * @ppe_dev: PPE Device
-+ *
-+ * EDMA debugfs setup.
-+ */
-+int edma_debugfs_setup(struct ppe_device *ppe_dev)
-+{
-+ edma_dentry = debugfs_create_dir("edma", ppe_dev->debugfs_root);
-+ if (!edma_dentry) {
-+ pr_err("Unable to create debugfs edma directory in debugfs\n");
-+ goto debugfs_dir_failed;
-+ }
-+
-+ stats_dentry = debugfs_create_dir("stats", edma_dentry);
-+ if (!stats_dentry) {
-+ pr_err("Unable to create debugfs stats directory in debugfs\n");
-+ goto debugfs_dir_failed;
-+ }
-+
-+ if (!debugfs_create_file("rx_ring_stats", 0444, stats_dentry,
-+ NULL, &edma_debugfs_rx_rings_file_ops)) {
-+ pr_err("Unable to create Rx rings statistics file entry in debugfs\n");
-+ goto debugfs_dir_failed;
-+ }
-+
-+ if (!debugfs_create_file("tx_ring_stats", 0444, stats_dentry,
-+ NULL, &edma_debugfs_tx_rings_file_ops)) {
-+ pr_err("Unable to create Tx rings statistics file entry in debugfs\n");
-+ goto debugfs_dir_failed;
-+ }
-+
-+ /* Allocate memory for EDMA miscellaneous stats */
-+ if (edma_err_stats_alloc() < 0) {
-+ pr_err("Unable to allocate miscellaneous percpu stats\n");
-+ goto debugfs_dir_failed;
-+ }
-+
-+ if (!debugfs_create_file("err_stats", 0444, stats_dentry,
-+ NULL, &edma_debugfs_misc_file_ops)) {
-+ pr_err("Unable to create EDMA miscellaneous statistics file entry in debugfs\n");
-+ goto debugfs_dir_failed;
-+ }
-+
-+ return 0;
-+
-+debugfs_dir_failed:
-+ debugfs_remove_recursive(edma_dentry);
-+ edma_dentry = NULL;
-+ stats_dentry = NULL;
-+ return -ENOMEM;
-+}
---- a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
-@@ -6,9 +6,11 @@
- /* PPE debugfs routines for display of PPE counters useful for debug. */
-
- #include <linux/debugfs.h>
-+#include <linux/netdevice.h>
- #include <linux/regmap.h>
- #include <linux/seq_file.h>
-
-+#include "edma.h"
- #include "ppe.h"
- #include "ppe_config.h"
- #include "ppe_debugfs.h"
-@@ -711,15 +713,30 @@ static const struct file_operations ppe_
-
- void ppe_debugfs_setup(struct ppe_device *ppe_dev)
- {
-+ int ret;
-+
- ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
- debugfs_create_file("packet_counter", 0444,
- ppe_dev->debugfs_root,
- ppe_dev,
- &ppe_debugfs_packet_counter_fops);
-+
-+ if (!ppe_dev->debugfs_root) {
-+ dev_err(ppe_dev->dev, "Error in PPE debugfs setup\n");
-+ return;
-+ }
-+
-+ ret = edma_debugfs_setup(ppe_dev);
-+ if (ret) {
-+ dev_err(ppe_dev->dev, "Error in EDMA debugfs setup API. ret: %d\n", ret);
-+ debugfs_remove_recursive(ppe_dev->debugfs_root);
-+ ppe_dev->debugfs_root = NULL;
-+ }
- }
-
- void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
- {
-+ edma_debugfs_teardown();
- debugfs_remove_recursive(ppe_dev->debugfs_root);
- ppe_dev->debugfs_root = NULL;
- }
+++ /dev/null
-From ec30075badd13a3e2ffddd1c5dcb40e3c52202ed Mon Sep 17 00:00:00 2001
-From: Pavithra R <quic_pavir@quicinc.com>
-Date: Thu, 30 May 2024 20:46:36 +0530
-Subject: [PATCH 43/50] net: ethernet: qualcomm: Add ethtool support for EDMA
-
-ethtool ops can be used for EDMA netdevice configuration and statistics.
-
-Change-Id: I57fc19415dacbe51fed000520336463938220609
-Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
- drivers/net/ethernet/qualcomm/ppe/edma.h | 1 +
- .../net/ethernet/qualcomm/ppe/edma_ethtool.c | 294 ++++++++++++++++++
- drivers/net/ethernet/qualcomm/ppe/edma_port.c | 1 +
- 4 files changed, 297 insertions(+), 1 deletion(-)
- create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_ethtool.c
-
---- a/drivers/net/ethernet/qualcomm/ppe/Makefile
-+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
-@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
- qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
-
- #EDMA
--qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_debugfs.o edma_port.o edma_rx.o edma_tx.o
-+qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_debugfs.o edma_port.o edma_rx.o edma_tx.o edma_ethtool.o
---- a/drivers/net/ethernet/qualcomm/ppe/edma.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
-@@ -151,4 +151,5 @@ void edma_destroy(struct ppe_device *ppe
- int edma_setup(struct ppe_device *ppe_dev);
- void edma_debugfs_teardown(void);
- int edma_debugfs_setup(struct ppe_device *ppe_dev);
-+void edma_set_ethtool_ops(struct net_device *netdev);
- #endif
---- /dev/null
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_ethtool.c
-@@ -0,0 +1,294 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/* ethtool support for EDMA */
-+
-+#include <linux/cpumask.h>
-+#include <linux/ethtool.h>
-+#include <linux/kernel.h>
-+#include <linux/netdevice.h>
-+#include <linux/phylink.h>
-+
-+#include "edma.h"
-+#include "edma_port.h"
-+
-+struct edma_ethtool_stats {
-+ u8 stat_string[ETH_GSTRING_LEN];
-+ u32 stat_offset;
-+};
-+
-+/**
-+ * struct edma_gmac_stats - Per-GMAC statistics.
-+ * @rx_packets: Number of RX packets
-+ * @rx_bytes: Number of RX bytes
-+ * @rx_dropped: Number of RX dropped packets
-+ * @rx_fraglist_packets: Number of RX fraglist packets
-+ * @rx_nr_frag_packets: Number of RX nr fragment packets
-+ * @rx_nr_frag_headroom_err: Number of RX nr fragment packets with headroom error
-+ * @tx_packets: Number of TX packets
-+ * @tx_bytes: Number of TX bytes
-+ * @tx_dropped: Number of TX dropped packets
-+ * @tx_nr_frag_packets: Number of TX nr fragment packets
-+ * @tx_fraglist_packets: Number of TX fraglist packets
-+ * @tx_fraglist_with_nr_frags_packets: Number of TX fraglist packets with nr fragments
-+ * @tx_tso_packets: Number of TX TCP segmentation offload packets
-+ * @tx_tso_drop_packets: Number of TX TCP segmentation dropped packets
-+ * @tx_gso_packets: Number of TX SW GSO packets
-+ * @tx_gso_drop_packets: Number of TX SW GSO dropped packets
-+ * @tx_queue_stopped: Number of times Queue got stopped
-+ */
-+struct edma_gmac_stats {
-+ u64 rx_packets;
-+ u64 rx_bytes;
-+ u64 rx_dropped;
-+ u64 rx_fraglist_packets;
-+ u64 rx_nr_frag_packets;
-+ u64 rx_nr_frag_headroom_err;
-+ u64 tx_packets;
-+ u64 tx_bytes;
-+ u64 tx_dropped;
-+ u64 tx_nr_frag_packets;
-+ u64 tx_fraglist_packets;
-+ u64 tx_fraglist_with_nr_frags_packets;
-+ u64 tx_tso_packets;
-+ u64 tx_tso_drop_packets;
-+ u64 tx_gso_packets;
-+ u64 tx_gso_drop_packets;
-+ u64 tx_queue_stopped[EDMA_MAX_CORE];
-+};
-+
-+#define EDMA_STAT(m) offsetof(struct edma_gmac_stats, m)
-+
-+static const struct edma_ethtool_stats edma_gstrings_stats[] = {
-+ {"rx_bytes", EDMA_STAT(rx_bytes)},
-+ {"rx_packets", EDMA_STAT(rx_packets)},
-+ {"rx_dropped", EDMA_STAT(rx_dropped)},
-+ {"rx_fraglist_packets", EDMA_STAT(rx_fraglist_packets)},
-+ {"rx_nr_frag_packets", EDMA_STAT(rx_nr_frag_packets)},
-+ {"rx_nr_frag_headroom_err", EDMA_STAT(rx_nr_frag_headroom_err)},
-+ {"tx_bytes", EDMA_STAT(tx_bytes)},
-+ {"tx_packets", EDMA_STAT(tx_packets)},
-+ {"tx_dropped", EDMA_STAT(tx_dropped)},
-+ {"tx_nr_frag_packets", EDMA_STAT(tx_nr_frag_packets)},
-+ {"tx_fraglist_packets", EDMA_STAT(tx_fraglist_packets)},
-+ {"tx_fraglist_nr_frags_packets", EDMA_STAT(tx_fraglist_with_nr_frags_packets)},
-+ {"tx_tso_packets", EDMA_STAT(tx_tso_packets)},
-+ {"tx_tso_drop_packets", EDMA_STAT(tx_tso_drop_packets)},
-+ {"tx_gso_packets", EDMA_STAT(tx_gso_packets)},
-+ {"tx_gso_drop_packets", EDMA_STAT(tx_gso_drop_packets)},
-+ {"tx_queue_stopped_cpu0", EDMA_STAT(tx_queue_stopped[0])},
-+ {"tx_queue_stopped_cpu1", EDMA_STAT(tx_queue_stopped[1])},
-+ {"tx_queue_stopped_cpu2", EDMA_STAT(tx_queue_stopped[2])},
-+ {"tx_queue_stopped_cpu3", EDMA_STAT(tx_queue_stopped[3])},
-+};
-+
-+#define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats)
-+
-+static void edma_port_get_stats(struct net_device *netdev,
-+ struct edma_gmac_stats *stats)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct edma_port_rx_stats *pcpu_rx_stats;
-+ struct edma_port_tx_stats *pcpu_tx_stats;
-+ int i;
-+
-+ memset(stats, 0, sizeof(struct edma_port_pcpu_stats));
-+
-+ for_each_possible_cpu(i) {
-+ struct edma_port_rx_stats rxp;
-+ struct edma_port_tx_stats txp;
-+ unsigned int start;
-+
-+ pcpu_rx_stats = per_cpu_ptr(port_priv->pcpu_stats.rx_stats, i);
-+
-+ do {
-+ start = u64_stats_fetch_begin(&pcpu_rx_stats->syncp);
-+ memcpy(&rxp, pcpu_rx_stats, sizeof(*pcpu_rx_stats));
-+ } while (u64_stats_fetch_retry(&pcpu_rx_stats->syncp, start));
-+
-+ stats->rx_packets += rxp.rx_pkts;
-+ stats->rx_bytes += rxp.rx_bytes;
-+ stats->rx_dropped += rxp.rx_drops;
-+ stats->rx_nr_frag_packets += rxp.rx_nr_frag_pkts;
-+ stats->rx_fraglist_packets += rxp.rx_fraglist_pkts;
-+ stats->rx_nr_frag_headroom_err += rxp.rx_nr_frag_headroom_err;
-+
-+ pcpu_tx_stats = per_cpu_ptr(port_priv->pcpu_stats.tx_stats, i);
-+
-+ do {
-+ start = u64_stats_fetch_begin(&pcpu_tx_stats->syncp);
-+ memcpy(&txp, pcpu_tx_stats, sizeof(*pcpu_tx_stats));
-+ } while (u64_stats_fetch_retry(&pcpu_tx_stats->syncp, start));
-+
-+ stats->tx_packets += txp.tx_pkts;
-+ stats->tx_bytes += txp.tx_bytes;
-+ stats->tx_dropped += txp.tx_drops;
-+ stats->tx_nr_frag_packets += txp.tx_nr_frag_pkts;
-+ stats->tx_fraglist_packets += txp.tx_fraglist_pkts;
-+ stats->tx_fraglist_with_nr_frags_packets += txp.tx_fraglist_with_nr_frags_pkts;
-+ stats->tx_tso_packets += txp.tx_tso_pkts;
-+ stats->tx_tso_drop_packets += txp.tx_tso_drop_pkts;
-+ stats->tx_gso_packets += txp.tx_gso_pkts;
-+ stats->tx_gso_drop_packets += txp.tx_gso_drop_pkts;
-+ stats->tx_queue_stopped[i] += txp.tx_queue_stopped[i];
-+ }
-+}
-+
-+static void edma_get_ethtool_stats(struct net_device *netdev,
-+ __maybe_unused struct ethtool_stats *stats,
-+ u64 *data)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct edma_gmac_stats edma_stats;
-+ u64 *mib_data;
-+ int i;
-+ u8 *p;
-+
-+ if (!port_priv)
-+ return;
-+
-+ /* Get the DMA Driver statistics from the data plane if available. */
-+ memset(&edma_stats, 0, sizeof(struct edma_gmac_stats));
-+ edma_port_get_stats(netdev, &edma_stats);
-+
-+ /* Populate data plane statistics. */
-+ for (i = 0; i < EDMA_STATS_LEN; i++) {
-+ p = ((u8 *)(&edma_stats) + edma_gstrings_stats[i].stat_offset);
-+ data[i] = *(u64 *)p;
-+ }
-+
-+ /* Get the GMAC MIB statistics along with the DMA driver statistics. */
-+ mib_data = &data[EDMA_STATS_LEN];
-+ ppe_port_get_ethtool_stats(port_priv->ppe_port, mib_data);
-+}
-+
-+static int edma_get_strset_count(struct net_device *netdev, int sset)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ int sset_count = 0;
-+
-+ if (!port_priv || sset != ETH_SS_STATS)
-+ return 0;
-+
-+ sset_count = ppe_port_get_sset_count(port_priv->ppe_port, sset);
-+
-+ return (EDMA_STATS_LEN + sset_count);
-+}
-+
-+static void edma_get_strings(struct net_device *netdev, u32 stringset,
-+ u8 *data)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ int i;
-+
-+ if (!port_priv || stringset != ETH_SS_STATS)
-+ return;
-+
-+ for (i = 0; i < EDMA_STATS_LEN; i++) {
-+ memcpy(data, edma_gstrings_stats[i].stat_string,
-+ strlen(edma_gstrings_stats[i].stat_string));
-+ data += ETH_GSTRING_LEN;
-+ }
-+
-+ ppe_port_get_strings(port_priv->ppe_port, stringset, data);
-+}
-+
-+static int edma_get_link_ksettings(struct net_device *netdev,
-+ struct ethtool_link_ksettings *cmd)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct ppe_port *port = port_priv->ppe_port;
-+
-+ if (!port_priv)
-+ return -EINVAL;
-+
-+ return phylink_ethtool_ksettings_get(port->phylink, cmd);
-+}
-+
-+static int edma_set_link_ksettings(struct net_device *netdev,
-+ const struct ethtool_link_ksettings *cmd)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct ppe_port *port = port_priv->ppe_port;
-+
-+ if (!port_priv)
-+ return -EINVAL;
-+
-+ return phylink_ethtool_ksettings_set(port->phylink, cmd);
-+}
-+
-+static void edma_get_pauseparam(struct net_device *netdev,
-+ struct ethtool_pauseparam *pause)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct ppe_port *port = port_priv->ppe_port;
-+
-+ if (!port_priv)
-+ return;
-+
-+ phylink_ethtool_get_pauseparam(port->phylink, pause);
-+}
-+
-+static int edma_set_pauseparam(struct net_device *netdev,
-+ struct ethtool_pauseparam *pause)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct ppe_port *port = port_priv->ppe_port;
-+
-+ if (!port_priv)
-+ return -EINVAL;
-+
-+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
-+}
-+
-+static int edma_get_eee(struct net_device *netdev, struct ethtool_eee *eee)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct ppe_port *port = port_priv->ppe_port;
-+
-+ if (!port_priv)
-+ return -EINVAL;
-+
-+ return phylink_ethtool_get_eee(port->phylink, eee);
-+}
-+
-+static int edma_set_eee(struct net_device *netdev, struct ethtool_eee *eee)
-+{
-+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
-+ struct ppe_port *port = port_priv->ppe_port;
-+ int ret;
-+
-+ if (!port_priv)
-+ return -EINVAL;
-+
-+ ret = ppe_port_set_mac_eee(port_priv->ppe_port, eee);
-+ if (ret)
-+ return ret;
-+
-+ return phylink_ethtool_set_eee(port->phylink, eee);
-+}
-+
-+static const struct ethtool_ops edma_ethtool_ops = {
-+ .get_strings = &edma_get_strings,
-+ .get_sset_count = &edma_get_strset_count,
-+ .get_ethtool_stats = &edma_get_ethtool_stats,
-+ .get_link = ðtool_op_get_link,
-+ .get_link_ksettings = edma_get_link_ksettings,
-+ .set_link_ksettings = edma_set_link_ksettings,
-+ .get_pauseparam = &edma_get_pauseparam,
-+ .set_pauseparam = &edma_set_pauseparam,
-+ .get_eee = &edma_get_eee,
-+ .set_eee = &edma_set_eee,
-+};
-+
-+/**
-+ * edma_set_ethtool_ops - Set ethtool operations
-+ * @netdev: Netdevice
-+ *
-+ * Set ethtool operations.
-+ */
-+void edma_set_ethtool_ops(struct net_device *netdev)
-+{
-+ netdev->ethtool_ops = &edma_ethtool_ops;
-+}
---- a/drivers/net/ethernet/qualcomm/ppe/edma_port.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
-@@ -380,6 +380,7 @@ int edma_port_setup(struct ppe_port *por
- netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- netdev->netdev_ops = &edma_port_netdev_ops;
- netdev->gso_max_segs = GSO_MAX_SEGS;
-+ edma_set_ethtool_ops(netdev);
-
- maddr = mac_addr;
- if (of_get_mac_address(np, maddr))
+++ /dev/null
-From c9ad8286ca39c2545f6a6851a8ede8488a9263f3 Mon Sep 17 00:00:00 2001
-From: Pavithra R <quic_pavir@quicinc.com>
-Date: Tue, 11 Jun 2024 00:00:46 +0530
-Subject: [PATCH 44/50] net: ethernet: qualcomm: Add module parameters for
- driver tunings
-
-Add module params and corresponding functionality for Tx/Rx
-mitigation timer/packet count, napi budget and tx requeue stop.
-
-Change-Id: I1717559c931bba4f355ee06ab89f289818400ca2
-Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/edma.c | 35 +++++++++++++++++++
- .../net/ethernet/qualcomm/ppe/edma_cfg_rx.c | 29 +++++++++++++--
- .../net/ethernet/qualcomm/ppe/edma_cfg_rx.h | 21 +++++++++++
- .../net/ethernet/qualcomm/ppe/edma_cfg_tx.c | 29 +++++++++++++--
- .../net/ethernet/qualcomm/ppe/edma_cfg_tx.h | 16 +++++++++
- drivers/net/ethernet/qualcomm/ppe/edma_rx.h | 4 +++
- drivers/net/ethernet/qualcomm/ppe/edma_tx.h | 4 +++
- 7 files changed, 134 insertions(+), 4 deletions(-)
-
---- a/drivers/net/ethernet/qualcomm/ppe/edma.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
-@@ -38,6 +38,38 @@ static int rx_buff_size;
- module_param(rx_buff_size, int, 0640);
- MODULE_PARM_DESC(rx_buff_size, "Rx Buffer size for Jumbo MRU value (default:0)");
-
-+int edma_rx_napi_budget = EDMA_RX_NAPI_WORK_DEF;
-+module_param(edma_rx_napi_budget, int, 0444);
-+MODULE_PARM_DESC(edma_rx_napi_budget, "Rx NAPI budget (default:128, min:16, max:512)");
-+
-+int edma_tx_napi_budget = EDMA_TX_NAPI_WORK_DEF;
-+module_param(edma_tx_napi_budget, int, 0444);
-+MODULE_PARM_DESC(edma_tx_napi_budget, "Tx NAPI budget (default:512 for ipq95xx, min:16, max:512)");
-+
-+int edma_rx_mitigation_pkt_cnt = EDMA_RX_MITIGATION_PKT_CNT_DEF;
-+module_param(edma_rx_mitigation_pkt_cnt, int, 0444);
-+MODULE_PARM_DESC(edma_rx_mitigation_pkt_cnt,
-+ "Rx mitigation packet count value (default:16, min:0, max: 256)");
-+
-+s32 edma_rx_mitigation_timer = EDMA_RX_MITIGATION_TIMER_DEF;
-+module_param(edma_rx_mitigation_timer, int, 0444);
-+MODULE_PARM_DESC(edma_dp_rx_mitigation_timer,
-+ "Rx mitigation timer value in microseconds (default:25, min:0, max: 1000)");
-+
-+int edma_tx_mitigation_timer = EDMA_TX_MITIGATION_TIMER_DEF;
-+module_param(edma_tx_mitigation_timer, int, 0444);
-+MODULE_PARM_DESC(edma_tx_mitigation_timer,
-+ "Tx mitigation timer value in microseconds (default:250, min:0, max: 1000)");
-+
-+int edma_tx_mitigation_pkt_cnt = EDMA_TX_MITIGATION_PKT_CNT_DEF;
-+module_param(edma_tx_mitigation_pkt_cnt, int, 0444);
-+MODULE_PARM_DESC(edma_tx_mitigation_pkt_cnt,
-+ "Tx mitigation packet count value (default:16, min:0, max: 256)");
-+
-+static int tx_requeue_stop;
-+module_param(tx_requeue_stop, int, 0640);
-+MODULE_PARM_DESC(tx_requeue_stop, "Disable Tx requeue function (default:0)");
-+
- /* Priority to multi-queue mapping. */
- static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
- 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7};
-@@ -828,7 +860,10 @@ int edma_setup(struct ppe_device *ppe_de
- edma_ctx->hw_info = &ipq9574_hw_info;
- edma_ctx->ppe_dev = ppe_dev;
- edma_ctx->rx_buf_size = rx_buff_size;
-+
- edma_ctx->tx_requeue_stop = false;
-+ if (tx_requeue_stop != 0)
-+ edma_ctx->tx_requeue_stop = true;
-
- /* Configure the EDMA common clocks. */
- ret = edma_clock_init();
---- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
-@@ -166,6 +166,24 @@ static void edma_cfg_rx_desc_ring_config
- reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_RING_SIZE(rxdesc_ring->ring_id);
- regmap_write(regmap, reg, data);
-
-+ /* Validate mitigation timer value */
-+ if (edma_rx_mitigation_timer < EDMA_RX_MITIGATION_TIMER_MIN ||
-+ edma_rx_mitigation_timer > EDMA_RX_MITIGATION_TIMER_MAX) {
-+ pr_err("Invalid Rx mitigation timer configured:%d for ring:%d. Using the default timer value:%d\n",
-+ edma_rx_mitigation_timer, rxdesc_ring->ring_id,
-+ EDMA_RX_MITIGATION_TIMER_DEF);
-+ edma_rx_mitigation_timer = EDMA_RX_MITIGATION_TIMER_DEF;
-+ }
-+
-+ /* Validate mitigation packet count value */
-+ if (edma_rx_mitigation_pkt_cnt < EDMA_RX_MITIGATION_PKT_CNT_MIN ||
-+ edma_rx_mitigation_pkt_cnt > EDMA_RX_MITIGATION_PKT_CNT_MAX) {
-+ pr_err("Invalid Rx mitigation packet count configured:%d for ring:%d. Using the default packet counter value:%d\n",
-+ edma_rx_mitigation_timer, rxdesc_ring->ring_id,
-+ EDMA_RX_MITIGATION_PKT_CNT_DEF);
-+ edma_rx_mitigation_pkt_cnt = EDMA_RX_MITIGATION_PKT_CNT_DEF;
-+ }
-+
- /* Configure the Mitigation timer */
- data = EDMA_MICROSEC_TO_TIMER_UNIT(EDMA_RX_MITIGATION_TIMER_DEF,
- ppe_dev->clk_rate / MHZ);
-@@ -176,7 +194,7 @@ static void edma_cfg_rx_desc_ring_config
- regmap_write(regmap, reg, data);
-
- /* Configure the Mitigation packet count */
-- data = (EDMA_RX_MITIGATION_PKT_CNT_DEF & EDMA_RXDESC_LOW_THRE_MASK)
-+ data = (edma_rx_mitigation_pkt_cnt & EDMA_RXDESC_LOW_THRE_MASK)
- << EDMA_RXDESC_LOW_THRE_SHIFT;
- pr_debug("EDMA Rx mitigation packet count value: %d\n", data);
- reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_UGT_THRE(rxdesc_ring->ring_id);
-@@ -915,6 +933,13 @@ void edma_cfg_rx_napi_add(void)
- struct edma_ring_info *rx = hw_info->rx;
- u32 i;
-
-+ if (edma_rx_napi_budget < EDMA_RX_NAPI_WORK_MIN ||
-+ edma_rx_napi_budget > EDMA_RX_NAPI_WORK_MAX) {
-+ pr_err("Incorrect Rx NAPI budget: %d, setting to default: %d",
-+ edma_rx_napi_budget, hw_info->napi_budget_rx);
-+ edma_rx_napi_budget = hw_info->napi_budget_rx;
-+ }
-+
- for (i = 0; i < rx->num_rings; i++) {
- struct edma_rxdesc_ring *rxdesc_ring = &edma_ctx->rx_rings[i];
-
-@@ -923,7 +948,7 @@ void edma_cfg_rx_napi_add(void)
- rxdesc_ring->napi_added = true;
- }
-
-- netdev_dbg(edma_ctx->dummy_dev, "Rx NAPI budget: %d\n", hw_info->napi_budget_rx);
-+ netdev_dbg(edma_ctx->dummy_dev, "Rx NAPI budget: %d\n", edma_rx_napi_budget);
- }
-
- /**
---- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
-@@ -5,6 +5,15 @@
- #ifndef __EDMA_CFG_RX__
- #define __EDMA_CFG_RX__
-
-+/* Rx default NAPI budget */
-+#define EDMA_RX_NAPI_WORK_DEF 128
-+
-+/* RX minimum NAPI budget */
-+#define EDMA_RX_NAPI_WORK_MIN 16
-+
-+/* Rx maximum NAPI budget */
-+#define EDMA_RX_NAPI_WORK_MAX 512
-+
- /* SKB payload size used in page mode */
- #define EDMA_RX_PAGE_MODE_SKB_SIZE 256
-
-@@ -22,9 +31,21 @@
- /* Rx mitigation timer's default value in microseconds */
- #define EDMA_RX_MITIGATION_TIMER_DEF 25
-
-+/* Rx mitigation timer's minimum value in microseconds */
-+#define EDMA_RX_MITIGATION_TIMER_MIN 0
-+
-+/* Rx mitigation timer's maximum value in microseconds */
-+#define EDMA_RX_MITIGATION_TIMER_MAX 1000
-+
- /* Rx mitigation packet count's default value */
- #define EDMA_RX_MITIGATION_PKT_CNT_DEF 16
-
-+/* Rx mitigation packet count's minimum value */
-+#define EDMA_RX_MITIGATION_PKT_CNT_MIN 0
-+
-+/* Rx mitigation packet count's maximum value */
-+#define EDMA_RX_MITIGATION_PKT_CNT_MAX 256
-+
- /* Default bitmap of cores for RPS to ARM cores */
- #define EDMA_RX_DEFAULT_BITMAP ((1 << EDMA_MAX_CORE) - 1)
-
---- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c
-@@ -170,6 +170,24 @@ static void edma_cfg_txcmpl_ring_configu
- reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_CTRL(txcmpl_ring->id);
- regmap_write(regmap, reg, EDMA_TXCMPL_RETMODE_OPAQUE);
-
-+ /* Validate mitigation timer value */
-+ if (edma_tx_mitigation_timer < EDMA_TX_MITIGATION_TIMER_MIN ||
-+ edma_tx_mitigation_timer > EDMA_TX_MITIGATION_TIMER_MAX) {
-+ pr_err("Invalid Tx mitigation timer configured:%d for ring:%d. Using the default timer value:%d\n",
-+ edma_tx_mitigation_timer, txcmpl_ring->id,
-+ EDMA_TX_MITIGATION_TIMER_DEF);
-+ edma_tx_mitigation_timer = EDMA_TX_MITIGATION_TIMER_DEF;
-+ }
-+
-+ /* Validate mitigation packet count value */
-+ if (edma_tx_mitigation_pkt_cnt < EDMA_TX_MITIGATION_PKT_CNT_MIN ||
-+ edma_tx_mitigation_pkt_cnt > EDMA_TX_MITIGATION_PKT_CNT_MAX) {
-+ pr_err("Invalid Tx mitigation packet count configured:%d for ring:%d. Using the default packet counter value:%d\n",
-+ edma_tx_mitigation_timer, txcmpl_ring->id,
-+ EDMA_TX_MITIGATION_PKT_CNT_DEF);
-+ edma_tx_mitigation_pkt_cnt = EDMA_TX_MITIGATION_PKT_CNT_DEF;
-+ }
-+
- /* Configure the Mitigation timer. */
- data = EDMA_MICROSEC_TO_TIMER_UNIT(EDMA_TX_MITIGATION_TIMER_DEF,
- ppe_dev->clk_rate / MHZ);
-@@ -180,7 +198,7 @@ static void edma_cfg_txcmpl_ring_configu
- regmap_write(regmap, reg, data);
-
- /* Configure the Mitigation packet count. */
-- data = (EDMA_TX_MITIGATION_PKT_CNT_DEF & EDMA_TXCMPL_LOW_THRE_MASK)
-+ data = (edma_tx_mitigation_pkt_cnt & EDMA_TXCMPL_LOW_THRE_MASK)
- << EDMA_TXCMPL_LOW_THRE_SHIFT;
- pr_debug("EDMA Tx mitigation packet count value: %d\n", data);
- reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_UGT_THRE(txcmpl_ring->id);
-@@ -634,6 +652,13 @@ void edma_cfg_tx_napi_add(struct net_dev
- struct edma_txcmpl_ring *txcmpl_ring;
- u32 i, ring_idx;
-
-+ if (edma_tx_napi_budget < EDMA_TX_NAPI_WORK_MIN ||
-+ edma_tx_napi_budget > EDMA_TX_NAPI_WORK_MAX) {
-+ pr_err("Incorrect Tx NAPI budget: %d, setting to default: %d",
-+ edma_tx_napi_budget, hw_info->napi_budget_tx);
-+ edma_tx_napi_budget = hw_info->napi_budget_tx;
-+ }
-+
- /* Adding tx napi for a interface with each queue. */
- for_each_possible_cpu(i) {
- ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
-@@ -644,5 +669,5 @@ void edma_cfg_tx_napi_add(struct net_dev
- netdev_dbg(netdev, "Napi added for txcmpl ring: %u\n", txcmpl_ring->id);
- }
-
-- netdev_dbg(netdev, "Tx NAPI budget: %d\n", hw_info->napi_budget_tx);
-+ netdev_dbg(netdev, "Tx NAPI budget: %d\n", edma_tx_napi_budget);
- }
---- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h
-@@ -5,12 +5,28 @@
- #ifndef __EDMA_CFG_TX__
- #define __EDMA_CFG_TX__
-
-+#define EDMA_TX_NAPI_WORK_DEF 512
-+#define EDMA_TX_NAPI_WORK_MIN 16
-+#define EDMA_TX_NAPI_WORK_MAX 512
-+
- /* Tx mitigation timer's default value. */
- #define EDMA_TX_MITIGATION_TIMER_DEF 250
-
-+/* Tx mitigation timer's minimum value in microseconds */
-+#define EDMA_TX_MITIGATION_TIMER_MIN 0
-+
-+/* Tx mitigation timer's maximum value in microseconds */
-+#define EDMA_TX_MITIGATION_TIMER_MAX 1000
-+
- /* Tx mitigation packet count default value. */
- #define EDMA_TX_MITIGATION_PKT_CNT_DEF 16
-
-+/* Tx mitigation packet count's minimum value */
-+#define EDMA_TX_MITIGATION_PKT_CNT_MIN 0
-+
-+/* Tx mitigation packet count's maximum value */
-+#define EDMA_TX_MITIGATION_PKT_CNT_MAX 256
-+
- void edma_cfg_tx_rings(void);
- int edma_cfg_tx_rings_alloc(void);
- void edma_cfg_tx_rings_cleanup(void);
---- a/drivers/net/ethernet/qualcomm/ppe/edma_rx.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_rx.h
-@@ -281,6 +281,10 @@ struct edma_rxdesc_ring {
- struct sk_buff *last;
- };
-
-+extern int edma_rx_napi_budget;
-+extern int edma_rx_mitigation_timer;
-+extern int edma_rx_mitigation_pkt_cnt;
-+
- irqreturn_t edma_rx_handle_irq(int irq, void *ctx);
- int edma_rx_alloc_buffer(struct edma_rxfill_ring *rxfill_ring, int alloc_count);
- int edma_rx_napi_poll(struct napi_struct *napi, int budget);
---- a/drivers/net/ethernet/qualcomm/ppe/edma_tx.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_tx.h
-@@ -288,6 +288,10 @@ struct edma_txcmpl_ring {
- bool napi_added;
- };
-
-+extern int edma_tx_napi_budget;
-+extern int edma_tx_mitigation_timer;
-+extern int edma_tx_mitigation_pkt_cnt;
-+
- enum edma_tx_status edma_tx_ring_xmit(struct net_device *netdev,
- struct sk_buff *skb,
- struct edma_txdesc_ring *txdesc_ring,
+++ /dev/null
-From a36607b554841358733167483d194ae7d3969444 Mon Sep 17 00:00:00 2001
-From: Pavithra R <quic_pavir@quicinc.com>
-Date: Tue, 11 Jun 2024 01:43:22 +0530
-Subject: [PATCH 45/50] net: ethernet: qualcomm: Add sysctl for RPS bitmap
-
-Add sysctl to configure RPS bitmap for EDMA receive.
-This bitmap is used to configure the set of ARM cores
-used to receive packets from EDMA.
-
-Change-Id: Ie0e7d5971db93ea1494608a9e79c4abb13ce69b6
-Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
----
- drivers/net/ethernet/qualcomm/ppe/edma.c | 23 ++++++++++++++++
- drivers/net/ethernet/qualcomm/ppe/edma.h | 2 ++
- .../net/ethernet/qualcomm/ppe/edma_cfg_rx.c | 27 +++++++++++++++++++
- .../net/ethernet/qualcomm/ppe/edma_cfg_rx.h | 4 +++
- 4 files changed, 56 insertions(+)
-
---- a/drivers/net/ethernet/qualcomm/ppe/edma.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
-@@ -797,6 +797,11 @@ void edma_destroy(struct ppe_device *ppe
- struct edma_ring_info *rx = hw_info->rx;
- u32 i;
-
-+ if (edma_ctx->rx_rps_ctl_table_hdr) {
-+ unregister_sysctl_table(edma_ctx->rx_rps_ctl_table_hdr);
-+ edma_ctx->rx_rps_ctl_table_hdr = NULL;
-+ }
-+
- /* Disable interrupts. */
- for (i = 1; i <= hw_info->max_ports; i++)
- edma_cfg_tx_disable_interrupts(i);
-@@ -840,6 +845,17 @@ void edma_destroy(struct ppe_device *ppe
- kfree(edma_ctx->netdev_arr);
- }
-
-+/* EDMA Rx RPS core sysctl table */
-+static struct ctl_table edma_rx_rps_core_table[] = {
-+ {
-+ .procname = "rps_bitmap_cores",
-+ .data = &edma_cfg_rx_rps_bitmap_cores,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = edma_cfg_rx_rps_bitmap
-+ },
-+};
-+
- /**
- * edma_setup - EDMA Setup.
- * @ppe_dev: PPE device
-@@ -865,6 +881,13 @@ int edma_setup(struct ppe_device *ppe_de
- if (tx_requeue_stop != 0)
- edma_ctx->tx_requeue_stop = true;
-
-+ edma_ctx->rx_rps_ctl_table_hdr = register_sysctl("net/edma",
-+ edma_rx_rps_core_table);
-+ if (!edma_ctx->rx_rps_ctl_table_hdr) {
-+ pr_err("Rx rps sysctl table configuration failed\n");
-+ return -EINVAL;
-+ }
-+
- /* Configure the EDMA common clocks. */
- ret = edma_clock_init();
- if (ret) {
---- a/drivers/net/ethernet/qualcomm/ppe/edma.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
-@@ -122,6 +122,7 @@ struct edma_intr_info {
- * @tx_rings: Tx Descriptor Ring, SW is producer
- * @txcmpl_rings: Tx complete Ring, SW is consumer
- * @err_stats: Per CPU error statistics
-+ * @rx_rps_ctl_table_hdr: Rx RPS sysctl table
- * @rx_page_mode: Page mode enabled or disabled
- * @rx_buf_size: Rx buffer size for Jumbo MRU
- * @tx_requeue_stop: Tx requeue stop enabled or disabled
-@@ -137,6 +138,7 @@ struct edma_context {
- struct edma_txdesc_ring *tx_rings;
- struct edma_txcmpl_ring *txcmpl_rings;
- struct edma_err_stats __percpu *err_stats;
-+ struct ctl_table_header *rx_rps_ctl_table_hdr;
- u32 rx_page_mode;
- u32 rx_buf_size;
- bool tx_requeue_stop;
---- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
-@@ -43,6 +43,8 @@ static u32 edma_rx_ring_queue_map[][EDMA
- { 6, 14, 22, 30 },
- { 7, 15, 23, 31 }};
-
-+u32 edma_cfg_rx_rps_bitmap_cores = EDMA_RX_DEFAULT_BITMAP;
-+
- static int edma_cfg_rx_desc_rings_reset_queue_mapping(void)
- {
- struct edma_hw_info *hw_info = edma_ctx->hw_info;
-@@ -987,3 +989,28 @@ int edma_cfg_rx_rps_hash_map(void)
-
- return 0;
- }
-+
-+/* Configure RPS hash mapping based on bitmap */
-+int edma_cfg_rx_rps_bitmap(struct ctl_table *table, int write,
-+ void *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ int ret;
-+
-+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
-+
-+ if (!write)
-+ return ret;
-+
-+ if (!edma_cfg_rx_rps_bitmap_cores ||
-+ edma_cfg_rx_rps_bitmap_cores > EDMA_RX_DEFAULT_BITMAP) {
-+ pr_warn("Incorrect CPU bitmap: %x. Setting it to default value: %d",
-+ edma_cfg_rx_rps_bitmap_cores, EDMA_RX_DEFAULT_BITMAP);
-+ edma_cfg_rx_rps_bitmap_cores = EDMA_RX_DEFAULT_BITMAP;
-+ }
-+
-+ ret = edma_cfg_rx_rps_hash_map();
-+
-+ pr_info("EDMA RPS bitmap value: %d\n", edma_cfg_rx_rps_bitmap_cores);
-+
-+ return ret;
-+}
---- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
-@@ -49,6 +49,8 @@
- /* Default bitmap of cores for RPS to ARM cores */
- #define EDMA_RX_DEFAULT_BITMAP ((1 << EDMA_MAX_CORE) - 1)
-
-+extern u32 edma_cfg_rx_rps_bitmap_cores;
-+
- int edma_cfg_rx_rings(void);
- int edma_cfg_rx_rings_alloc(void);
- void edma_cfg_rx_ring_mappings(void);
-@@ -66,4 +68,6 @@ void edma_cfg_rx_buff_size_setup(void);
- int edma_cfg_rx_rps_hash_map(void);
- int edma_cfg_rx_rps(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-+int edma_cfg_rx_rps_bitmap(struct ctl_table *table, int write,
-+ void *buffer, size_t *lenp, loff_t *ppos);
- #endif
+++ /dev/null
-From 47f539afdab8fb99d4c047add3e1a1b1dc5a3f2d Mon Sep 17 00:00:00 2001
-From: Devi Priya <quic_devipriy@quicinc.com>
-Date: Fri, 25 Oct 2024 09:25:15 +0530
-Subject: [PATCH 2/7] dt-bindings: clock: gcc-ipq9574: Add definition for
- GPLL0_OUT_AUX
-
-Add the definition for GPLL0_OUT_AUX clock.
-
-Acked-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
-Signed-off-by: Devi Priya <quic_devipriy@quicinc.com>
-Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
----
- include/dt-bindings/clock/qcom,ipq9574-gcc.h | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/include/dt-bindings/clock/qcom,ipq9574-gcc.h
-+++ b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
-@@ -220,4 +220,5 @@
- #define GCC_PCIE1_PIPE_CLK 211
- #define GCC_PCIE2_PIPE_CLK 212
- #define GCC_PCIE3_PIPE_CLK 213
-+#define GPLL0_OUT_AUX 214
- #endif
+++ /dev/null
-From ac2bd244609c4423f96406005c9cee8b6952cd20 Mon Sep 17 00:00:00 2001
-From: Devi Priya <quic_devipriy@quicinc.com>
-Date: Fri, 25 Oct 2024 09:25:16 +0530
-Subject: [PATCH 3/7] clk: qcom: gcc-ipq9574: Add support for gpll0_out_aux
- clock
-
-Add support for gpll0_out_aux clock which acts as the parent for
-certain networking subsystem (nss) clocks.
-
-Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
-Signed-off-by: Devi Priya <quic_devipriy@quicinc.com>
-Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
----
- drivers/clk/qcom/gcc-ipq9574.c | 15 +++++++++++++++
- 1 file changed, 15 insertions(+)
-
---- a/drivers/clk/qcom/gcc-ipq9574.c
-+++ b/drivers/clk/qcom/gcc-ipq9574.c
-@@ -108,6 +108,20 @@ static struct clk_alpha_pll_postdiv gpll
- },
- };
-
-+static struct clk_alpha_pll_postdiv gpll0_out_aux = {
-+ .offset = 0x20000,
-+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
-+ .width = 4,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "gpll0_out_aux",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &gpll0_main.clkr.hw
-+ },
-+ .num_parents = 1,
-+ .ops = &clk_alpha_pll_postdiv_ro_ops,
-+ },
-+};
-+
- static struct clk_alpha_pll gpll4_main = {
- .offset = 0x22000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
-@@ -4222,6 +4236,7 @@ static struct clk_regmap *gcc_ipq9574_cl
- [GCC_PCIE1_PIPE_CLK] = &gcc_pcie1_pipe_clk.clkr,
- [GCC_PCIE2_PIPE_CLK] = &gcc_pcie2_pipe_clk.clkr,
- [GCC_PCIE3_PIPE_CLK] = &gcc_pcie3_pipe_clk.clkr,
-+ [GPLL0_OUT_AUX] = &gpll0_out_aux.clkr,
- };
-
- static const struct qcom_reset_map gcc_ipq9574_resets[] = {
+++ /dev/null
-From 3d98604921d4b7216d3d0c8a76160dce083bd040 Mon Sep 17 00:00:00 2001
-From: Devi Priya <quic_devipriy@quicinc.com>
-Date: Fri, 25 Oct 2024 09:25:17 +0530
-Subject: [PATCH 4/7] dt-bindings: clock: Add ipq9574 NSSCC clock and reset
- definitions
-
-Add NSSCC clock and reset definitions for ipq9574.
-
-Signed-off-by: Devi Priya <quic_devipriy@quicinc.com>
-Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
-Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
----
- .../bindings/clock/qcom,ipq9574-nsscc.yaml | 73 +++++++++
- .../dt-bindings/clock/qcom,ipq9574-nsscc.h | 152 ++++++++++++++++++
- .../dt-bindings/reset/qcom,ipq9574-nsscc.h | 134 +++++++++++++++
- 3 files changed, 359 insertions(+)
- create mode 100644 Documentation/devicetree/bindings/clock/qcom,ipq9574-nsscc.yaml
- create mode 100644 include/dt-bindings/clock/qcom,ipq9574-nsscc.h
- create mode 100644 include/dt-bindings/reset/qcom,ipq9574-nsscc.h
-
---- /dev/null
-+++ b/Documentation/devicetree/bindings/clock/qcom,ipq9574-nsscc.yaml
-@@ -0,0 +1,73 @@
-+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-+%YAML 1.2
-+---
-+$id: http://devicetree.org/schemas/clock/qcom,ipq9574-nsscc.yaml#
-+$schema: http://devicetree.org/meta-schemas/core.yaml#
-+
-+title: Qualcomm Networking Sub System Clock & Reset Controller on IPQ9574
-+
-+maintainers:
-+ - Bjorn Andersson <andersson@kernel.org>
-+ - Anusha Rao <quic_anusha@quicinc.com>
-+
-+description: |
-+ Qualcomm networking sub system clock control module provides the clocks,
-+ resets and power domains on IPQ9574
-+
-+ See also::
-+ include/dt-bindings/clock/qcom,ipq9574-nsscc.h
-+ include/dt-bindings/reset/qcom,ipq9574-nsscc.h
-+
-+properties:
-+ compatible:
-+ const: qcom,ipq9574-nsscc
-+
-+ clocks:
-+ items:
-+ - description: Board XO source
-+ - description: CMN_PLL NSS 1200MHz (Bias PLL cc) clock source
-+ - description: CMN_PLL PPE 353MHz (Bias PLL ubi nc) clock source
-+ - description: GCC GPLL0 OUT AUX clock source
-+ - description: Uniphy0 NSS Rx clock source
-+ - description: Uniphy0 NSS Tx clock source
-+ - description: Uniphy1 NSS Rx clock source
-+ - description: Uniphy1 NSS Tx clock source
-+ - description: Uniphy2 NSS Rx clock source
-+ - description: Uniphy2 NSS Tx clock source
-+ - description: GCC NSSCC clock source
-+
-+ '#interconnect-cells':
-+ const: 1
-+
-+required:
-+ - compatible
-+ - clocks
-+
-+allOf:
-+ - $ref: qcom,gcc.yaml#
-+
-+unevaluatedProperties: false
-+
-+examples:
-+ - |
-+ #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
-+ #include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
-+ clock-controller@39b00000 {
-+ compatible = "qcom,ipq9574-nsscc";
-+ reg = <0x39b00000 0x80000>;
-+ clocks = <&xo_board_clk>,
-+ <&cmn_pll NSS_1200MHZ_CLK>,
-+ <&cmn_pll PPE_353MHZ_CLK>,
-+ <&gcc GPLL0_OUT_AUX>,
-+ <&uniphy 0>,
-+ <&uniphy 1>,
-+ <&uniphy 2>,
-+ <&uniphy 3>,
-+ <&uniphy 4>,
-+ <&uniphy 5>,
-+ <&gcc GCC_NSSCC_CLK>;
-+ #clock-cells = <1>;
-+ #reset-cells = <1>;
-+ #power-domain-cells = <1>;
-+ };
-+...
---- /dev/null
-+++ b/include/dt-bindings/clock/qcom,ipq9574-nsscc.h
-@@ -0,0 +1,152 @@
-+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-+/*
-+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
-+ */
-+
-+#ifndef _DT_BINDINGS_CLOCK_IPQ_NSSCC_9574_H
-+#define _DT_BINDINGS_CLOCK_IPQ_NSSCC_9574_H
-+
-+#define NSS_CC_CE_APB_CLK 0
-+#define NSS_CC_CE_AXI_CLK 1
-+#define NSS_CC_CE_CLK_SRC 2
-+#define NSS_CC_CFG_CLK_SRC 3
-+#define NSS_CC_CLC_AXI_CLK 4
-+#define NSS_CC_CLC_CLK_SRC 5
-+#define NSS_CC_CRYPTO_CLK 6
-+#define NSS_CC_CRYPTO_CLK_SRC 7
-+#define NSS_CC_CRYPTO_PPE_CLK 8
-+#define NSS_CC_HAQ_AHB_CLK 9
-+#define NSS_CC_HAQ_AXI_CLK 10
-+#define NSS_CC_HAQ_CLK_SRC 11
-+#define NSS_CC_IMEM_AHB_CLK 12
-+#define NSS_CC_IMEM_CLK_SRC 13
-+#define NSS_CC_IMEM_QSB_CLK 14
-+#define NSS_CC_INT_CFG_CLK_SRC 15
-+#define NSS_CC_NSS_CSR_CLK 16
-+#define NSS_CC_NSSNOC_CE_APB_CLK 17
-+#define NSS_CC_NSSNOC_CE_AXI_CLK 18
-+#define NSS_CC_NSSNOC_CLC_AXI_CLK 19
-+#define NSS_CC_NSSNOC_CRYPTO_CLK 20
-+#define NSS_CC_NSSNOC_HAQ_AHB_CLK 21
-+#define NSS_CC_NSSNOC_HAQ_AXI_CLK 22
-+#define NSS_CC_NSSNOC_IMEM_AHB_CLK 23
-+#define NSS_CC_NSSNOC_IMEM_QSB_CLK 24
-+#define NSS_CC_NSSNOC_NSS_CSR_CLK 25
-+#define NSS_CC_NSSNOC_PPE_CFG_CLK 26
-+#define NSS_CC_NSSNOC_PPE_CLK 27
-+#define NSS_CC_NSSNOC_UBI32_AHB0_CLK 28
-+#define NSS_CC_NSSNOC_UBI32_AXI0_CLK 29
-+#define NSS_CC_NSSNOC_UBI32_INT0_AHB_CLK 30
-+#define NSS_CC_NSSNOC_UBI32_NC_AXI0_1_CLK 31
-+#define NSS_CC_NSSNOC_UBI32_NC_AXI0_CLK 32
-+#define NSS_CC_PORT1_MAC_CLK 33
-+#define NSS_CC_PORT1_RX_CLK 34
-+#define NSS_CC_PORT1_RX_CLK_SRC 35
-+#define NSS_CC_PORT1_RX_DIV_CLK_SRC 36
-+#define NSS_CC_PORT1_TX_CLK 37
-+#define NSS_CC_PORT1_TX_CLK_SRC 38
-+#define NSS_CC_PORT1_TX_DIV_CLK_SRC 39
-+#define NSS_CC_PORT2_MAC_CLK 40
-+#define NSS_CC_PORT2_RX_CLK 41
-+#define NSS_CC_PORT2_RX_CLK_SRC 42
-+#define NSS_CC_PORT2_RX_DIV_CLK_SRC 43
-+#define NSS_CC_PORT2_TX_CLK 44
-+#define NSS_CC_PORT2_TX_CLK_SRC 45
-+#define NSS_CC_PORT2_TX_DIV_CLK_SRC 46
-+#define NSS_CC_PORT3_MAC_CLK 47
-+#define NSS_CC_PORT3_RX_CLK 48
-+#define NSS_CC_PORT3_RX_CLK_SRC 49
-+#define NSS_CC_PORT3_RX_DIV_CLK_SRC 50
-+#define NSS_CC_PORT3_TX_CLK 51
-+#define NSS_CC_PORT3_TX_CLK_SRC 52
-+#define NSS_CC_PORT3_TX_DIV_CLK_SRC 53
-+#define NSS_CC_PORT4_MAC_CLK 54
-+#define NSS_CC_PORT4_RX_CLK 55
-+#define NSS_CC_PORT4_RX_CLK_SRC 56
-+#define NSS_CC_PORT4_RX_DIV_CLK_SRC 57
-+#define NSS_CC_PORT4_TX_CLK 58
-+#define NSS_CC_PORT4_TX_CLK_SRC 59
-+#define NSS_CC_PORT4_TX_DIV_CLK_SRC 60
-+#define NSS_CC_PORT5_MAC_CLK 61
-+#define NSS_CC_PORT5_RX_CLK 62
-+#define NSS_CC_PORT5_RX_CLK_SRC 63
-+#define NSS_CC_PORT5_RX_DIV_CLK_SRC 64
-+#define NSS_CC_PORT5_TX_CLK 65
-+#define NSS_CC_PORT5_TX_CLK_SRC 66
-+#define NSS_CC_PORT5_TX_DIV_CLK_SRC 67
-+#define NSS_CC_PORT6_MAC_CLK 68
-+#define NSS_CC_PORT6_RX_CLK 69
-+#define NSS_CC_PORT6_RX_CLK_SRC 70
-+#define NSS_CC_PORT6_RX_DIV_CLK_SRC 71
-+#define NSS_CC_PORT6_TX_CLK 72
-+#define NSS_CC_PORT6_TX_CLK_SRC 73
-+#define NSS_CC_PORT6_TX_DIV_CLK_SRC 74
-+#define NSS_CC_PPE_CLK_SRC 75
-+#define NSS_CC_PPE_EDMA_CFG_CLK 76
-+#define NSS_CC_PPE_EDMA_CLK 77
-+#define NSS_CC_PPE_SWITCH_BTQ_CLK 78
-+#define NSS_CC_PPE_SWITCH_CFG_CLK 79
-+#define NSS_CC_PPE_SWITCH_CLK 80
-+#define NSS_CC_PPE_SWITCH_IPE_CLK 81
-+#define NSS_CC_UBI0_CLK_SRC 82
-+#define NSS_CC_UBI0_DIV_CLK_SRC 83
-+#define NSS_CC_UBI1_CLK_SRC 84
-+#define NSS_CC_UBI1_DIV_CLK_SRC 85
-+#define NSS_CC_UBI2_CLK_SRC 86
-+#define NSS_CC_UBI2_DIV_CLK_SRC 87
-+#define NSS_CC_UBI32_AHB0_CLK 88
-+#define NSS_CC_UBI32_AHB1_CLK 89
-+#define NSS_CC_UBI32_AHB2_CLK 90
-+#define NSS_CC_UBI32_AHB3_CLK 91
-+#define NSS_CC_UBI32_AXI0_CLK 92
-+#define NSS_CC_UBI32_AXI1_CLK 93
-+#define NSS_CC_UBI32_AXI2_CLK 94
-+#define NSS_CC_UBI32_AXI3_CLK 95
-+#define NSS_CC_UBI32_CORE0_CLK 96
-+#define NSS_CC_UBI32_CORE1_CLK 97
-+#define NSS_CC_UBI32_CORE2_CLK 98
-+#define NSS_CC_UBI32_CORE3_CLK 99
-+#define NSS_CC_UBI32_INTR0_AHB_CLK 100
-+#define NSS_CC_UBI32_INTR1_AHB_CLK 101
-+#define NSS_CC_UBI32_INTR2_AHB_CLK 102
-+#define NSS_CC_UBI32_INTR3_AHB_CLK 103
-+#define NSS_CC_UBI32_NC_AXI0_CLK 104
-+#define NSS_CC_UBI32_NC_AXI1_CLK 105
-+#define NSS_CC_UBI32_NC_AXI2_CLK 106
-+#define NSS_CC_UBI32_NC_AXI3_CLK 107
-+#define NSS_CC_UBI32_UTCM0_CLK 108
-+#define NSS_CC_UBI32_UTCM1_CLK 109
-+#define NSS_CC_UBI32_UTCM2_CLK 110
-+#define NSS_CC_UBI32_UTCM3_CLK 111
-+#define NSS_CC_UBI3_CLK_SRC 112
-+#define NSS_CC_UBI3_DIV_CLK_SRC 113
-+#define NSS_CC_UBI_AXI_CLK_SRC 114
-+#define NSS_CC_UBI_NC_AXI_BFDCD_CLK_SRC 115
-+#define NSS_CC_UNIPHY_PORT1_RX_CLK 116
-+#define NSS_CC_UNIPHY_PORT1_TX_CLK 117
-+#define NSS_CC_UNIPHY_PORT2_RX_CLK 118
-+#define NSS_CC_UNIPHY_PORT2_TX_CLK 119
-+#define NSS_CC_UNIPHY_PORT3_RX_CLK 120
-+#define NSS_CC_UNIPHY_PORT3_TX_CLK 121
-+#define NSS_CC_UNIPHY_PORT4_RX_CLK 122
-+#define NSS_CC_UNIPHY_PORT4_TX_CLK 123
-+#define NSS_CC_UNIPHY_PORT5_RX_CLK 124
-+#define NSS_CC_UNIPHY_PORT5_TX_CLK 125
-+#define NSS_CC_UNIPHY_PORT6_RX_CLK 126
-+#define NSS_CC_UNIPHY_PORT6_TX_CLK 127
-+#define NSS_CC_XGMAC0_PTP_REF_CLK 128
-+#define NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC 129
-+#define NSS_CC_XGMAC1_PTP_REF_CLK 130
-+#define NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC 131
-+#define NSS_CC_XGMAC2_PTP_REF_CLK 132
-+#define NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC 133
-+#define NSS_CC_XGMAC3_PTP_REF_CLK 134
-+#define NSS_CC_XGMAC3_PTP_REF_DIV_CLK_SRC 135
-+#define NSS_CC_XGMAC4_PTP_REF_CLK 136
-+#define NSS_CC_XGMAC4_PTP_REF_DIV_CLK_SRC 137
-+#define NSS_CC_XGMAC5_PTP_REF_CLK 138
-+#define NSS_CC_XGMAC5_PTP_REF_DIV_CLK_SRC 139
-+#define UBI32_PLL 140
-+#define UBI32_PLL_MAIN 141
-+
-+#endif
---- /dev/null
-+++ b/include/dt-bindings/reset/qcom,ipq9574-nsscc.h
-@@ -0,0 +1,134 @@
-+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-+/*
-+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
-+ */
-+
-+#ifndef _DT_BINDINGS_RESET_IPQ_NSSCC_9574_H
-+#define _DT_BINDINGS_RESET_IPQ_NSSCC_9574_H
-+
-+#define EDMA_HW_RESET 0
-+#define NSS_CC_CE_BCR 1
-+#define NSS_CC_CLC_BCR 2
-+#define NSS_CC_EIP197_BCR 3
-+#define NSS_CC_HAQ_BCR 4
-+#define NSS_CC_IMEM_BCR 5
-+#define NSS_CC_MAC_BCR 6
-+#define NSS_CC_PPE_BCR 7
-+#define NSS_CC_UBI_BCR 8
-+#define NSS_CC_UNIPHY_BCR 9
-+#define UBI3_CLKRST_CLAMP_ENABLE 10
-+#define UBI3_CORE_CLAMP_ENABLE 11
-+#define UBI2_CLKRST_CLAMP_ENABLE 12
-+#define UBI2_CORE_CLAMP_ENABLE 13
-+#define UBI1_CLKRST_CLAMP_ENABLE 14
-+#define UBI1_CORE_CLAMP_ENABLE 15
-+#define UBI0_CLKRST_CLAMP_ENABLE 16
-+#define UBI0_CORE_CLAMP_ENABLE 17
-+#define NSSNOC_NSS_CSR_ARES 18
-+#define NSS_CSR_ARES 19
-+#define PPE_BTQ_ARES 20
-+#define PPE_IPE_ARES 21
-+#define PPE_ARES 22
-+#define PPE_CFG_ARES 23
-+#define PPE_EDMA_ARES 24
-+#define PPE_EDMA_CFG_ARES 25
-+#define CRY_PPE_ARES 26
-+#define NSSNOC_PPE_ARES 27
-+#define NSSNOC_PPE_CFG_ARES 28
-+#define PORT1_MAC_ARES 29
-+#define PORT2_MAC_ARES 30
-+#define PORT3_MAC_ARES 31
-+#define PORT4_MAC_ARES 32
-+#define PORT5_MAC_ARES 33
-+#define PORT6_MAC_ARES 34
-+#define XGMAC0_PTP_REF_ARES 35
-+#define XGMAC1_PTP_REF_ARES 36
-+#define XGMAC2_PTP_REF_ARES 37
-+#define XGMAC3_PTP_REF_ARES 38
-+#define XGMAC4_PTP_REF_ARES 39
-+#define XGMAC5_PTP_REF_ARES 40
-+#define HAQ_AHB_ARES 41
-+#define HAQ_AXI_ARES 42
-+#define NSSNOC_HAQ_AHB_ARES 43
-+#define NSSNOC_HAQ_AXI_ARES 44
-+#define CE_APB_ARES 45
-+#define CE_AXI_ARES 46
-+#define NSSNOC_CE_APB_ARES 47
-+#define NSSNOC_CE_AXI_ARES 48
-+#define CRYPTO_ARES 49
-+#define NSSNOC_CRYPTO_ARES 50
-+#define NSSNOC_NC_AXI0_1_ARES 51
-+#define UBI0_CORE_ARES 52
-+#define UBI1_CORE_ARES 53
-+#define UBI2_CORE_ARES 54
-+#define UBI3_CORE_ARES 55
-+#define NC_AXI0_ARES 56
-+#define UTCM0_ARES 57
-+#define NC_AXI1_ARES 58
-+#define UTCM1_ARES 59
-+#define NC_AXI2_ARES 60
-+#define UTCM2_ARES 61
-+#define NC_AXI3_ARES 62
-+#define UTCM3_ARES 63
-+#define NSSNOC_NC_AXI0_ARES 64
-+#define AHB0_ARES 65
-+#define INTR0_AHB_ARES 66
-+#define AHB1_ARES 67
-+#define INTR1_AHB_ARES 68
-+#define AHB2_ARES 69
-+#define INTR2_AHB_ARES 70
-+#define AHB3_ARES 71
-+#define INTR3_AHB_ARES 72
-+#define NSSNOC_AHB0_ARES 73
-+#define NSSNOC_INT0_AHB_ARES 74
-+#define AXI0_ARES 75
-+#define AXI1_ARES 76
-+#define AXI2_ARES 77
-+#define AXI3_ARES 78
-+#define NSSNOC_AXI0_ARES 79
-+#define IMEM_QSB_ARES 80
-+#define NSSNOC_IMEM_QSB_ARES 81
-+#define IMEM_AHB_ARES 82
-+#define NSSNOC_IMEM_AHB_ARES 83
-+#define UNIPHY_PORT1_RX_ARES 84
-+#define UNIPHY_PORT1_TX_ARES 85
-+#define UNIPHY_PORT2_RX_ARES 86
-+#define UNIPHY_PORT2_TX_ARES 87
-+#define UNIPHY_PORT3_RX_ARES 88
-+#define UNIPHY_PORT3_TX_ARES 89
-+#define UNIPHY_PORT4_RX_ARES 90
-+#define UNIPHY_PORT4_TX_ARES 91
-+#define UNIPHY_PORT5_RX_ARES 92
-+#define UNIPHY_PORT5_TX_ARES 93
-+#define UNIPHY_PORT6_RX_ARES 94
-+#define UNIPHY_PORT6_TX_ARES 95
-+#define PORT1_RX_ARES 96
-+#define PORT1_TX_ARES 97
-+#define PORT2_RX_ARES 98
-+#define PORT2_TX_ARES 99
-+#define PORT3_RX_ARES 100
-+#define PORT3_TX_ARES 101
-+#define PORT4_RX_ARES 102
-+#define PORT4_TX_ARES 103
-+#define PORT5_RX_ARES 104
-+#define PORT5_TX_ARES 105
-+#define PORT6_RX_ARES 106
-+#define PORT6_TX_ARES 107
-+#define PPE_FULL_RESET 108
-+#define UNIPHY0_SOFT_RESET 109
-+#define UNIPHY1_SOFT_RESET 110
-+#define UNIPHY2_SOFT_RESET 111
-+#define UNIPHY_PORT1_ARES 112
-+#define UNIPHY_PORT2_ARES 113
-+#define UNIPHY_PORT3_ARES 114
-+#define UNIPHY_PORT4_ARES 115
-+#define UNIPHY_PORT5_ARES 116
-+#define UNIPHY_PORT6_ARES 117
-+#define NSSPORT1_RESET 118
-+#define NSSPORT2_RESET 119
-+#define NSSPORT3_RESET 120
-+#define NSSPORT4_RESET 121
-+#define NSSPORT5_RESET 122
-+#define NSSPORT6_RESET 123
-+
-+#endif
+++ /dev/null
-From 86db870d9e00b71fb3bd2c8a1a72cda971d9a77d Mon Sep 17 00:00:00 2001
-From: Devi Priya <quic_devipriy@quicinc.com>
-Date: Fri, 25 Oct 2024 09:25:18 +0530
-Subject: [PATCH 5/7] clk: qcom: Add NSS clock Controller driver for IPQ9574
-
-Add Networking Sub System Clock Controller(NSSCC) driver for ipq9574 based
-devices.
-
-Reported-by: kernel test robot <lkp@intel.com>
-Closes: https://lore.kernel.org/oe-kbuild-all/202410101431.tjpSRNTY-lkp@intel.com/
-Signed-off-by: Devi Priya <quic_devipriy@quicinc.com>
-Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
----
- drivers/clk/qcom/Kconfig | 7 +
- drivers/clk/qcom/Makefile | 1 +
- drivers/clk/qcom/nsscc-ipq9574.c | 3080 ++++++++++++++++++++++++++++++
- 3 files changed, 3088 insertions(+)
- create mode 100644 drivers/clk/qcom/nsscc-ipq9574.c
-
---- a/drivers/clk/qcom/Kconfig
-+++ b/drivers/clk/qcom/Kconfig
-@@ -215,6 +215,13 @@ config IPQ_GCC_9574
- i2c, USB, SD/eMMC, etc. Select this for the root clock
- of ipq9574.
-
-+config IPQ_NSSCC_9574
-+ tristate "IPQ9574 NSS Clock Controller"
-+ depends on ARM64 || COMPILE_TEST
-+ depends on IPQ_GCC_9574
-+ help
-+ Support for NSS clock controller on ipq9574 devices.
-+
- config IPQ_NSSCC_QCA8K
- tristate "QCA8K(QCA8386 or QCA8084) NSS Clock Controller"
- depends on MDIO_BUS || COMPILE_TEST
---- a/drivers/clk/qcom/Makefile
-+++ b/drivers/clk/qcom/Makefile
-@@ -31,6 +31,7 @@ obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq601
- obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
- obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
- obj-$(CONFIG_IPQ_GCC_9574) += gcc-ipq9574.o
-+obj-$(CONFIG_IPQ_NSSCC_9574) += nsscc-ipq9574.o
- obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
- obj-$(CONFIG_IPQ_NSSCC_QCA8K) += nsscc-qca8k.o
- obj-$(CONFIG_MDM_GCC_9607) += gcc-mdm9607.o
---- /dev/null
-+++ b/drivers/clk/qcom/nsscc-ipq9574.c
-@@ -0,0 +1,3080 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
-+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#include <linux/clk.h>
-+#include <linux/clk-provider.h>
-+#include <linux/err.h>
-+#include <linux/interconnect-provider.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/of_device.h>
-+#include <linux/regmap.h>
-+#include <linux/platform_device.h>
-+
-+#include <dt-bindings/clock/qcom,ipq9574-nsscc.h>
-+#include <dt-bindings/interconnect/qcom,ipq9574.h>
-+#include <dt-bindings/reset/qcom,ipq9574-nsscc.h>
-+
-+#include "clk-alpha-pll.h"
-+#include "clk-branch.h"
-+#include "clk-pll.h"
-+#include "clk-rcg.h"
-+#include "clk-regmap.h"
-+#include "clk-regmap-divider.h"
-+#include "clk-regmap-mux.h"
-+#include "common.h"
-+#include "reset.h"
-+
-+/* Need to match the order of clocks in DT binding */
-+enum {
-+ DT_XO,
-+ DT_BIAS_PLL_CC_CLK,
-+ DT_BIAS_PLL_UBI_NC_CLK,
-+ DT_GCC_GPLL0_OUT_AUX,
-+ DT_UNIPHY0_NSS_RX_CLK,
-+ DT_UNIPHY0_NSS_TX_CLK,
-+ DT_UNIPHY1_NSS_RX_CLK,
-+ DT_UNIPHY1_NSS_TX_CLK,
-+ DT_UNIPHY2_NSS_RX_CLK,
-+ DT_UNIPHY2_NSS_TX_CLK,
-+};
-+
-+enum {
-+ P_XO,
-+ P_BIAS_PLL_CC_CLK,
-+ P_BIAS_PLL_UBI_NC_CLK,
-+ P_GCC_GPLL0_OUT_AUX,
-+ P_UBI32_PLL_OUT_MAIN,
-+ P_UNIPHY0_NSS_RX_CLK,
-+ P_UNIPHY0_NSS_TX_CLK,
-+ P_UNIPHY1_NSS_RX_CLK,
-+ P_UNIPHY1_NSS_TX_CLK,
-+ P_UNIPHY2_NSS_RX_CLK,
-+ P_UNIPHY2_NSS_TX_CLK,
-+};
-+
-+static const struct alpha_pll_config ubi32_pll_config = {
-+ .l = 0x3e,
-+ .alpha = 0x6666,
-+ .config_ctl_val = 0x200d4aa8,
-+ .config_ctl_hi_val = 0x3c,
-+ .main_output_mask = BIT(0),
-+ .aux_output_mask = BIT(1),
-+ .pre_div_val = 0x0,
-+ .pre_div_mask = BIT(12),
-+ .post_div_val = 0x0,
-+ .post_div_mask = GENMASK(9, 8),
-+ .alpha_en_mask = BIT(24),
-+ .test_ctl_val = 0x1c0000c0,
-+ .test_ctl_hi_val = 0x4000,
-+};
-+
-+static struct clk_alpha_pll ubi32_pll_main = {
-+ .offset = 0x28000,
-+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
-+ .flags = SUPPORTS_DYNAMIC_UPDATE,
-+ .clkr = {
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "ubi32_pll_main",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .index = DT_XO,
-+ },
-+ .num_parents = 1,
-+ .ops = &clk_alpha_pll_huayra_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_alpha_pll_postdiv ubi32_pll = {
-+ .offset = 0x28000,
-+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
-+ .width = 2,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "ubi32_pll",
-+ .parent_hws = (const struct clk_hw *[]) {
-+ &ubi32_pll_main.clkr.hw
-+ },
-+ .num_parents = 1,
-+ .ops = &clk_alpha_pll_postdiv_ro_ops,
-+ .flags = CLK_SET_RATE_PARENT,
-+ },
-+};
-+
-+static const struct parent_map nss_cc_parent_map_0[] = {
-+ { P_XO, 0 },
-+ { P_BIAS_PLL_CC_CLK, 1 },
-+ { P_UNIPHY0_NSS_RX_CLK, 2 },
-+ { P_UNIPHY0_NSS_TX_CLK, 3 },
-+ { P_UNIPHY1_NSS_RX_CLK, 4 },
-+ { P_UNIPHY1_NSS_TX_CLK, 5 },
-+};
-+
-+static const struct clk_parent_data nss_cc_parent_data_0[] = {
-+ { .index = DT_XO },
-+ { .index = DT_BIAS_PLL_CC_CLK },
-+ { .index = DT_UNIPHY0_NSS_RX_CLK },
-+ { .index = DT_UNIPHY0_NSS_TX_CLK },
-+ { .index = DT_UNIPHY1_NSS_RX_CLK },
-+ { .index = DT_UNIPHY1_NSS_TX_CLK },
-+};
-+
-+static const struct parent_map nss_cc_parent_map_1[] = {
-+ { P_XO, 0 },
-+ { P_BIAS_PLL_UBI_NC_CLK, 1 },
-+ { P_GCC_GPLL0_OUT_AUX, 2 },
-+ { P_BIAS_PLL_CC_CLK, 6 },
-+};
-+
-+static const struct clk_parent_data nss_cc_parent_data_1[] = {
-+ { .index = DT_XO },
-+ { .index = DT_BIAS_PLL_UBI_NC_CLK },
-+ { .index = DT_GCC_GPLL0_OUT_AUX },
-+ { .index = DT_BIAS_PLL_CC_CLK },
-+};
-+
-+static const struct parent_map nss_cc_parent_map_2[] = {
-+ { P_XO, 0 },
-+ { P_UBI32_PLL_OUT_MAIN, 1 },
-+ { P_GCC_GPLL0_OUT_AUX, 2 },
-+};
-+
-+static const struct clk_parent_data nss_cc_parent_data_2[] = {
-+ { .index = DT_XO },
-+ { .hw = &ubi32_pll.clkr.hw },
-+ { .index = DT_GCC_GPLL0_OUT_AUX },
-+};
-+
-+static const struct parent_map nss_cc_parent_map_3[] = {
-+ { P_XO, 0 },
-+ { P_BIAS_PLL_CC_CLK, 1 },
-+ { P_GCC_GPLL0_OUT_AUX, 2 },
-+};
-+
-+static const struct clk_parent_data nss_cc_parent_data_3[] = {
-+ { .index = DT_XO },
-+ { .index = DT_BIAS_PLL_CC_CLK },
-+ { .index = DT_GCC_GPLL0_OUT_AUX },
-+};
-+
-+static const struct parent_map nss_cc_parent_map_4[] = {
-+ { P_XO, 0 },
-+ { P_BIAS_PLL_CC_CLK, 1 },
-+ { P_UNIPHY0_NSS_RX_CLK, 2 },
-+ { P_UNIPHY0_NSS_TX_CLK, 3 },
-+};
-+
-+static const struct clk_parent_data nss_cc_parent_data_4[] = {
-+ { .index = DT_XO },
-+ { .index = DT_BIAS_PLL_CC_CLK },
-+ { .index = DT_UNIPHY0_NSS_RX_CLK },
-+ { .index = DT_UNIPHY0_NSS_TX_CLK },
-+};
-+
-+static const struct parent_map nss_cc_parent_map_5[] = {
-+ { P_XO, 0 },
-+ { P_BIAS_PLL_CC_CLK, 1 },
-+ { P_UNIPHY2_NSS_RX_CLK, 2 },
-+ { P_UNIPHY2_NSS_TX_CLK, 3 },
-+};
-+
-+static const struct clk_parent_data nss_cc_parent_data_5[] = {
-+ { .index = DT_XO },
-+ { .index = DT_BIAS_PLL_CC_CLK },
-+ { .index = DT_UNIPHY2_NSS_RX_CLK },
-+ { .index = DT_UNIPHY2_NSS_TX_CLK },
-+};
-+
-+static const struct parent_map nss_cc_parent_map_6[] = {
-+ { P_XO, 0 },
-+ { P_GCC_GPLL0_OUT_AUX, 2 },
-+ { P_BIAS_PLL_CC_CLK, 6 },
-+};
-+
-+static const struct clk_parent_data nss_cc_parent_data_6[] = {
-+ { .index = DT_XO },
-+ { .index = DT_GCC_GPLL0_OUT_AUX },
-+ { .index = DT_BIAS_PLL_CC_CLK },
-+};
-+
-+static const struct parent_map nss_cc_parent_map_7[] = {
-+ { P_XO, 0 },
-+ { P_UBI32_PLL_OUT_MAIN, 1 },
-+ { P_GCC_GPLL0_OUT_AUX, 2 },
-+ { P_BIAS_PLL_CC_CLK, 6 },
-+};
-+
-+static const struct clk_parent_data nss_cc_parent_data_7[] = {
-+ { .index = DT_XO },
-+ { .hw = &ubi32_pll.clkr.hw },
-+ { .index = DT_GCC_GPLL0_OUT_AUX },
-+ { .index = DT_BIAS_PLL_CC_CLK },
-+};
-+
-+static const struct freq_tbl ftbl_nss_cc_ce_clk_src[] = {
-+ F(24000000, P_XO, 1, 0, 0),
-+ F(353000000, P_BIAS_PLL_UBI_NC_CLK, 1, 0, 0),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_ce_clk_src = {
-+ .cmd_rcgr = 0x28404,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_1,
-+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ce_clk_src",
-+ .parent_data = nss_cc_parent_data_1,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static const struct freq_tbl ftbl_nss_cc_cfg_clk_src[] = {
-+ F(100000000, P_GCC_GPLL0_OUT_AUX, 8, 0, 0),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_cfg_clk_src = {
-+ .cmd_rcgr = 0x28104,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_3,
-+ .freq_tbl = ftbl_nss_cc_cfg_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_cfg_clk_src",
-+ .parent_data = nss_cc_parent_data_3,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static const struct freq_tbl ftbl_nss_cc_clc_clk_src[] = {
-+ F(533333333, P_GCC_GPLL0_OUT_AUX, 1.5, 0, 0),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_clc_clk_src = {
-+ .cmd_rcgr = 0x28604,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_6,
-+ .freq_tbl = ftbl_nss_cc_clc_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_clc_clk_src",
-+ .parent_data = nss_cc_parent_data_6,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_6),
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static const struct freq_tbl ftbl_nss_cc_crypto_clk_src[] = {
-+ F(24000000, P_XO, 1, 0, 0),
-+ F(300000000, P_BIAS_PLL_CC_CLK, 4, 0, 0),
-+ F(600000000, P_BIAS_PLL_CC_CLK, 2, 0, 0),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_crypto_clk_src = {
-+ .cmd_rcgr = 0x16008,
-+ .mnd_width = 16,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_3,
-+ .freq_tbl = ftbl_nss_cc_crypto_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_crypto_clk_src",
-+ .parent_data = nss_cc_parent_data_3,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_haq_clk_src = {
-+ .cmd_rcgr = 0x28304,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_1,
-+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_haq_clk_src",
-+ .parent_data = nss_cc_parent_data_1,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_imem_clk_src = {
-+ .cmd_rcgr = 0xe008,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_1,
-+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_imem_clk_src",
-+ .parent_data = nss_cc_parent_data_1,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static const struct freq_tbl ftbl_nss_cc_int_cfg_clk_src[] = {
-+ F(200000000, P_GCC_GPLL0_OUT_AUX, 4, 0, 0),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_int_cfg_clk_src = {
-+ .cmd_rcgr = 0x287b4,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_3,
-+ .freq_tbl = ftbl_nss_cc_int_cfg_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_int_cfg_clk_src",
-+ .parent_data = nss_cc_parent_data_3,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port1_rx_clk_src_25[] = {
-+ C(P_UNIPHY0_NSS_RX_CLK, 12.5, 0, 0),
-+ C(P_UNIPHY0_NSS_RX_CLK, 5, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port1_rx_clk_src_125[] = {
-+ C(P_UNIPHY0_NSS_RX_CLK, 2.5, 0, 0),
-+ C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
-+};
-+
-+static const struct freq_multi_tbl ftbl_nss_cc_port1_rx_clk_src[] = {
-+ FMS(24000000, P_XO, 1, 0, 0),
-+ FM(25000000, ftbl_nss_cc_port1_rx_clk_src_25),
-+ FMS(78125000, P_UNIPHY0_NSS_RX_CLK, 4, 0, 0),
-+ FM(125000000, ftbl_nss_cc_port1_rx_clk_src_125),
-+ FMS(312500000, P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
-+ { }
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port1_tx_clk_src_25[] = {
-+ C(P_UNIPHY0_NSS_TX_CLK, 12.5, 0, 0),
-+ C(P_UNIPHY0_NSS_TX_CLK, 5, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port1_tx_clk_src_125[] = {
-+ C(P_UNIPHY0_NSS_TX_CLK, 2.5, 0, 0),
-+ C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
-+};
-+
-+static const struct freq_multi_tbl ftbl_nss_cc_port1_tx_clk_src[] = {
-+ FMS(24000000, P_XO, 1, 0, 0),
-+ FM(25000000, ftbl_nss_cc_port1_tx_clk_src_25),
-+ FMS(78125000, P_UNIPHY0_NSS_TX_CLK, 4, 0, 0),
-+ FM(125000000, ftbl_nss_cc_port1_tx_clk_src_125),
-+ FMS(312500000, P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
-+ { }
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_25[] = {
-+ C(P_UNIPHY1_NSS_RX_CLK, 12.5, 0, 0),
-+ C(P_UNIPHY0_NSS_RX_CLK, 5, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_125[] = {
-+ C(P_UNIPHY1_NSS_RX_CLK, 2.5, 0, 0),
-+ C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_312p5[] = {
-+ C(P_UNIPHY1_NSS_RX_CLK, 1, 0, 0),
-+ C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
-+};
-+
-+static const struct freq_multi_tbl ftbl_nss_cc_port5_rx_clk_src[] = {
-+ FMS(24000000, P_XO, 1, 0, 0),
-+ FM(25000000, ftbl_nss_cc_port5_rx_clk_src_25),
-+ FMS(78125000, P_UNIPHY1_NSS_RX_CLK, 4, 0, 0),
-+ FM(125000000, ftbl_nss_cc_port5_rx_clk_src_125),
-+ FMS(156250000, P_UNIPHY1_NSS_RX_CLK, 2, 0, 0),
-+ FM(312500000, ftbl_nss_cc_port5_rx_clk_src_312p5),
-+ { }
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_25[] = {
-+ C(P_UNIPHY1_NSS_TX_CLK, 12.5, 0, 0),
-+ C(P_UNIPHY0_NSS_TX_CLK, 5, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_125[] = {
-+ C(P_UNIPHY1_NSS_TX_CLK, 2.5, 0, 0),
-+ C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_312p5[] = {
-+ C(P_UNIPHY1_NSS_TX_CLK, 1, 0, 0),
-+ C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
-+};
-+
-+static const struct freq_multi_tbl ftbl_nss_cc_port5_tx_clk_src[] = {
-+ FMS(24000000, P_XO, 1, 0, 0),
-+ FM(25000000, ftbl_nss_cc_port5_tx_clk_src_25),
-+ FMS(78125000, P_UNIPHY1_NSS_TX_CLK, 4, 0, 0),
-+ FM(125000000, ftbl_nss_cc_port5_tx_clk_src_125),
-+ FMS(156250000, P_UNIPHY1_NSS_TX_CLK, 2, 0, 0),
-+ FM(312500000, ftbl_nss_cc_port5_tx_clk_src_312p5),
-+ { }
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port6_rx_clk_src_25[] = {
-+ C(P_UNIPHY2_NSS_RX_CLK, 12.5, 0, 0),
-+ C(P_UNIPHY2_NSS_RX_CLK, 5, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port6_rx_clk_src_125[] = {
-+ C(P_UNIPHY2_NSS_RX_CLK, 2.5, 0, 0),
-+ C(P_UNIPHY2_NSS_RX_CLK, 1, 0, 0),
-+};
-+
-+static const struct freq_multi_tbl ftbl_nss_cc_port6_rx_clk_src[] = {
-+ FMS(24000000, P_XO, 1, 0, 0),
-+ FM(25000000, ftbl_nss_cc_port6_rx_clk_src_25),
-+ FMS(78125000, P_UNIPHY2_NSS_RX_CLK, 4, 0, 0),
-+ FM(125000000, ftbl_nss_cc_port6_rx_clk_src_125),
-+ FMS(156250000, P_UNIPHY2_NSS_RX_CLK, 2, 0, 0),
-+ FMS(312500000, P_UNIPHY2_NSS_RX_CLK, 1, 0, 0),
-+ { }
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port6_tx_clk_src_25[] = {
-+ C(P_UNIPHY2_NSS_TX_CLK, 12.5, 0, 0),
-+ C(P_UNIPHY2_NSS_TX_CLK, 5, 0, 0),
-+};
-+
-+static const struct freq_conf ftbl_nss_cc_port6_tx_clk_src_125[] = {
-+ C(P_UNIPHY2_NSS_TX_CLK, 2.5, 0, 0),
-+ C(P_UNIPHY2_NSS_TX_CLK, 1, 0, 0),
-+};
-+
-+static const struct freq_multi_tbl ftbl_nss_cc_port6_tx_clk_src[] = {
-+ FMS(24000000, P_XO, 1, 0, 0),
-+ FM(25000000, ftbl_nss_cc_port6_tx_clk_src_25),
-+ FMS(78125000, P_UNIPHY2_NSS_TX_CLK, 4, 0, 0),
-+ FM(125000000, ftbl_nss_cc_port6_tx_clk_src_125),
-+ FMS(156250000, P_UNIPHY2_NSS_TX_CLK, 2, 0, 0),
-+ FMS(312500000, P_UNIPHY2_NSS_TX_CLK, 1, 0, 0),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_port1_rx_clk_src = {
-+ .cmd_rcgr = 0x28110,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_4,
-+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port1_rx_clk_src",
-+ .parent_data = nss_cc_parent_data_4,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_port1_tx_clk_src = {
-+ .cmd_rcgr = 0x2811c,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_4,
-+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port1_tx_clk_src",
-+ .parent_data = nss_cc_parent_data_4,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_port2_rx_clk_src = {
-+ .cmd_rcgr = 0x28128,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_4,
-+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port2_rx_clk_src",
-+ .parent_data = nss_cc_parent_data_4,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_port2_tx_clk_src = {
-+ .cmd_rcgr = 0x28134,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_4,
-+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port2_tx_clk_src",
-+ .parent_data = nss_cc_parent_data_4,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_port3_rx_clk_src = {
-+ .cmd_rcgr = 0x28140,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_4,
-+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port3_rx_clk_src",
-+ .parent_data = nss_cc_parent_data_4,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_port3_tx_clk_src = {
-+ .cmd_rcgr = 0x2814c,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_4,
-+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port3_tx_clk_src",
-+ .parent_data = nss_cc_parent_data_4,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_port4_rx_clk_src = {
-+ .cmd_rcgr = 0x28158,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_4,
-+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port4_rx_clk_src",
-+ .parent_data = nss_cc_parent_data_4,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_port4_tx_clk_src = {
-+ .cmd_rcgr = 0x28164,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_4,
-+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port4_tx_clk_src",
-+ .parent_data = nss_cc_parent_data_4,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_port5_rx_clk_src = {
-+ .cmd_rcgr = 0x28170,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_0,
-+ .freq_multi_tbl = ftbl_nss_cc_port5_rx_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port5_rx_clk_src",
-+ .parent_data = nss_cc_parent_data_0,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_port5_tx_clk_src = {
-+ .cmd_rcgr = 0x2817c,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_0,
-+ .freq_multi_tbl = ftbl_nss_cc_port5_tx_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port5_tx_clk_src",
-+ .parent_data = nss_cc_parent_data_0,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_port6_rx_clk_src = {
-+ .cmd_rcgr = 0x28188,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_5,
-+ .freq_multi_tbl = ftbl_nss_cc_port6_rx_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port6_rx_clk_src",
-+ .parent_data = nss_cc_parent_data_5,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_5),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_port6_tx_clk_src = {
-+ .cmd_rcgr = 0x28194,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_5,
-+ .freq_multi_tbl = ftbl_nss_cc_port6_tx_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port6_tx_clk_src",
-+ .parent_data = nss_cc_parent_data_5,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_5),
-+ .ops = &clk_rcg2_fm_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_ppe_clk_src = {
-+ .cmd_rcgr = 0x28204,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_1,
-+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ppe_clk_src",
-+ .parent_data = nss_cc_parent_data_1,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static const struct freq_tbl ftbl_nss_cc_ubi0_clk_src[] = {
-+ F(24000000, P_XO, 1, 0, 0),
-+ F(187200000, P_UBI32_PLL_OUT_MAIN, 8, 0, 0),
-+ F(748800000, P_UBI32_PLL_OUT_MAIN, 2, 0, 0),
-+ F(1497600000, P_UBI32_PLL_OUT_MAIN, 1, 0, 0),
-+ F(1689600000, P_UBI32_PLL_OUT_MAIN, 1, 0, 0),
-+ { }
-+};
-+
-+static struct clk_rcg2 nss_cc_ubi0_clk_src = {
-+ .cmd_rcgr = 0x28704,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_2,
-+ .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi0_clk_src",
-+ .parent_data = nss_cc_parent_data_2,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_ubi1_clk_src = {
-+ .cmd_rcgr = 0x2870c,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_2,
-+ .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi1_clk_src",
-+ .parent_data = nss_cc_parent_data_2,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_ubi2_clk_src = {
-+ .cmd_rcgr = 0x28714,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_2,
-+ .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi2_clk_src",
-+ .parent_data = nss_cc_parent_data_2,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_ubi3_clk_src = {
-+ .cmd_rcgr = 0x2871c,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_2,
-+ .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi3_clk_src",
-+ .parent_data = nss_cc_parent_data_2,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_ubi_axi_clk_src = {
-+ .cmd_rcgr = 0x28724,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_7,
-+ .freq_tbl = ftbl_nss_cc_clc_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi_axi_clk_src",
-+ .parent_data = nss_cc_parent_data_7,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_7),
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_rcg2 nss_cc_ubi_nc_axi_bfdcd_clk_src = {
-+ .cmd_rcgr = 0x2872c,
-+ .mnd_width = 0,
-+ .hid_width = 5,
-+ .parent_map = nss_cc_parent_map_1,
-+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi_nc_axi_bfdcd_clk_src",
-+ .parent_data = nss_cc_parent_data_1,
-+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_rcg2_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_port1_rx_div_clk_src = {
-+ .reg = 0x28118,
-+ .shift = 0,
-+ .width = 9,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port1_rx_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port1_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_port1_tx_div_clk_src = {
-+ .reg = 0x28124,
-+ .shift = 0,
-+ .width = 9,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port1_tx_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port1_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_port2_rx_div_clk_src = {
-+ .reg = 0x28130,
-+ .shift = 0,
-+ .width = 9,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port2_rx_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port2_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_port2_tx_div_clk_src = {
-+ .reg = 0x2813c,
-+ .shift = 0,
-+ .width = 9,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port2_tx_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port2_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_port3_rx_div_clk_src = {
-+ .reg = 0x28148,
-+ .shift = 0,
-+ .width = 9,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port3_rx_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port3_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_port3_tx_div_clk_src = {
-+ .reg = 0x28154,
-+ .shift = 0,
-+ .width = 9,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port3_tx_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port3_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_port4_rx_div_clk_src = {
-+ .reg = 0x28160,
-+ .shift = 0,
-+ .width = 9,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port4_rx_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port4_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_port4_tx_div_clk_src = {
-+ .reg = 0x2816c,
-+ .shift = 0,
-+ .width = 9,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port4_tx_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port4_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_port5_rx_div_clk_src = {
-+ .reg = 0x28178,
-+ .shift = 0,
-+ .width = 9,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port5_rx_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port5_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_port5_tx_div_clk_src = {
-+ .reg = 0x28184,
-+ .shift = 0,
-+ .width = 9,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port5_tx_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port5_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_port6_rx_div_clk_src = {
-+ .reg = 0x28190,
-+ .shift = 0,
-+ .width = 9,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port6_rx_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port6_rx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_port6_tx_div_clk_src = {
-+ .reg = 0x2819c,
-+ .shift = 0,
-+ .width = 9,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port6_tx_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port6_tx_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_ubi0_div_clk_src = {
-+ .reg = 0x287a4,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi0_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi0_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ro_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_ubi1_div_clk_src = {
-+ .reg = 0x287a8,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi1_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi1_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ro_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_ubi2_div_clk_src = {
-+ .reg = 0x287ac,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi2_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi2_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ro_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_ubi3_div_clk_src = {
-+ .reg = 0x287b0,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi3_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi3_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ro_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_xgmac0_ptp_ref_div_clk_src = {
-+ .reg = 0x28214,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_xgmac0_ptp_ref_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ro_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_xgmac1_ptp_ref_div_clk_src = {
-+ .reg = 0x28218,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_xgmac1_ptp_ref_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ro_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_xgmac2_ptp_ref_div_clk_src = {
-+ .reg = 0x2821c,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_xgmac2_ptp_ref_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ro_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_xgmac3_ptp_ref_div_clk_src = {
-+ .reg = 0x28220,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_xgmac3_ptp_ref_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ro_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_xgmac4_ptp_ref_div_clk_src = {
-+ .reg = 0x28224,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_xgmac4_ptp_ref_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ro_ops,
-+ },
-+};
-+
-+static struct clk_regmap_div nss_cc_xgmac5_ptp_ref_div_clk_src = {
-+ .reg = 0x28228,
-+ .shift = 0,
-+ .width = 4,
-+ .clkr.hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_xgmac5_ptp_ref_div_clk_src",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_regmap_div_ro_ops,
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ce_apb_clk = {
-+ .halt_reg = 0x2840c,
-+ .clkr = {
-+ .enable_reg = 0x2840c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ce_apb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ce_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ce_axi_clk = {
-+ .halt_reg = 0x28410,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28410,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ce_axi_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ce_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_clc_axi_clk = {
-+ .halt_reg = 0x2860c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2860c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_clc_axi_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_clc_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_crypto_clk = {
-+ .halt_reg = 0x1601c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x1601c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_crypto_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_crypto_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_crypto_ppe_clk = {
-+ .halt_reg = 0x28240,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28240,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_crypto_ppe_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_haq_ahb_clk = {
-+ .halt_reg = 0x2830c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2830c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_haq_ahb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_haq_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_haq_axi_clk = {
-+ .halt_reg = 0x28310,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28310,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_haq_axi_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_haq_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_imem_ahb_clk = {
-+ .halt_reg = 0xe018,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xe018,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_imem_ahb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_imem_qsb_clk = {
-+ .halt_reg = 0xe010,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xe010,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_imem_qsb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_imem_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nss_csr_clk = {
-+ .halt_reg = 0x281d0,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281d0,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nss_csr_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_ce_apb_clk = {
-+ .halt_reg = 0x28414,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28414,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_ce_apb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ce_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_ce_axi_clk = {
-+ .halt_reg = 0x28418,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28418,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_ce_axi_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ce_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_clc_axi_clk = {
-+ .halt_reg = 0x28610,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28610,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_clc_axi_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_clc_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_crypto_clk = {
-+ .halt_reg = 0x16020,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x16020,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_crypto_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_crypto_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_haq_ahb_clk = {
-+ .halt_reg = 0x28314,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28314,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_haq_ahb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_haq_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_haq_axi_clk = {
-+ .halt_reg = 0x28318,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28318,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_haq_axi_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_haq_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_imem_ahb_clk = {
-+ .halt_reg = 0xe01c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xe01c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_imem_ahb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_imem_qsb_clk = {
-+ .halt_reg = 0xe014,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0xe014,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_imem_qsb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_imem_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_nss_csr_clk = {
-+ .halt_reg = 0x281d4,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281d4,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_nss_csr_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_ppe_cfg_clk = {
-+ .halt_reg = 0x28248,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28248,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_ppe_cfg_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_ppe_clk = {
-+ .halt_reg = 0x28244,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28244,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_ppe_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_ubi32_ahb0_clk = {
-+ .halt_reg = 0x28788,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28788,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_ubi32_ahb0_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_ubi32_axi0_clk = {
-+ .halt_reg = 0x287a0,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x287a0,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_ubi32_axi0_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_ubi32_int0_ahb_clk = {
-+ .halt_reg = 0x2878c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2878c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_ubi32_int0_ahb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_ubi32_nc_axi0_1_clk = {
-+ .halt_reg = 0x287bc,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x287bc,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_ubi32_nc_axi0_1_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_nssnoc_ubi32_nc_axi0_clk = {
-+ .halt_reg = 0x28764,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28764,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_nssnoc_ubi32_nc_axi0_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port1_mac_clk = {
-+ .halt_reg = 0x2824c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2824c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port1_mac_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port1_rx_clk = {
-+ .halt_reg = 0x281a0,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281a0,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port1_rx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port1_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port1_tx_clk = {
-+ .halt_reg = 0x281a4,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281a4,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port1_tx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port1_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port2_mac_clk = {
-+ .halt_reg = 0x28250,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28250,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port2_mac_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port2_rx_clk = {
-+ .halt_reg = 0x281a8,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281a8,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port2_rx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port2_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port2_tx_clk = {
-+ .halt_reg = 0x281ac,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281ac,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port2_tx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port2_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port3_mac_clk = {
-+ .halt_reg = 0x28254,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28254,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port3_mac_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port3_rx_clk = {
-+ .halt_reg = 0x281b0,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281b0,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port3_rx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port3_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port3_tx_clk = {
-+ .halt_reg = 0x281b4,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281b4,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port3_tx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port3_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port4_mac_clk = {
-+ .halt_reg = 0x28258,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28258,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port4_mac_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port4_rx_clk = {
-+ .halt_reg = 0x281b8,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281b8,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port4_rx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port4_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port4_tx_clk = {
-+ .halt_reg = 0x281bc,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281bc,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port4_tx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port4_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port5_mac_clk = {
-+ .halt_reg = 0x2825c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2825c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port5_mac_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port5_rx_clk = {
-+ .halt_reg = 0x281c0,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281c0,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port5_rx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port5_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port5_tx_clk = {
-+ .halt_reg = 0x281c4,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281c4,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port5_tx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port5_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port6_mac_clk = {
-+ .halt_reg = 0x28260,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28260,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port6_mac_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port6_rx_clk = {
-+ .halt_reg = 0x281c8,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281c8,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port6_rx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port6_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_port6_tx_clk = {
-+ .halt_reg = 0x281cc,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x281cc,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_port6_tx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port6_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ppe_edma_cfg_clk = {
-+ .halt_reg = 0x2823c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2823c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ppe_edma_cfg_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ppe_edma_clk = {
-+ .halt_reg = 0x28238,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28238,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ppe_edma_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ppe_switch_btq_clk = {
-+ .halt_reg = 0x2827c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2827c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ppe_switch_btq_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ppe_switch_cfg_clk = {
-+ .halt_reg = 0x28234,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28234,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ppe_switch_cfg_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ppe_switch_clk = {
-+ .halt_reg = 0x28230,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28230,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ppe_switch_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ppe_switch_ipe_clk = {
-+ .halt_reg = 0x2822c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2822c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ppe_switch_ipe_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_ahb0_clk = {
-+ .halt_reg = 0x28768,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28768,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_ahb0_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_ahb1_clk = {
-+ .halt_reg = 0x28770,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28770,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_ahb1_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_ahb2_clk = {
-+ .halt_reg = 0x28778,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28778,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_ahb2_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_ahb3_clk = {
-+ .halt_reg = 0x28780,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28780,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_ahb3_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_axi0_clk = {
-+ .halt_reg = 0x28790,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28790,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_axi0_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_axi1_clk = {
-+ .halt_reg = 0x28794,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28794,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_axi1_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_axi2_clk = {
-+ .halt_reg = 0x28798,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28798,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_axi2_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_axi3_clk = {
-+ .halt_reg = 0x2879c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2879c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_axi3_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_core0_clk = {
-+ .halt_reg = 0x28734,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28734,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_core0_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi0_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_core1_clk = {
-+ .halt_reg = 0x28738,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28738,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_core1_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi1_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_core2_clk = {
-+ .halt_reg = 0x2873c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2873c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_core2_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi2_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_core3_clk = {
-+ .halt_reg = 0x28740,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28740,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_core3_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi3_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_intr0_ahb_clk = {
-+ .halt_reg = 0x2876c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2876c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_intr0_ahb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_intr1_ahb_clk = {
-+ .halt_reg = 0x28774,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28774,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_intr1_ahb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_intr2_ahb_clk = {
-+ .halt_reg = 0x2877c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2877c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_intr2_ahb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_intr3_ahb_clk = {
-+ .halt_reg = 0x28784,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28784,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_intr3_ahb_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_nc_axi0_clk = {
-+ .halt_reg = 0x28744,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28744,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_nc_axi0_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_nc_axi1_clk = {
-+ .halt_reg = 0x2874c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2874c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_nc_axi1_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_nc_axi2_clk = {
-+ .halt_reg = 0x28754,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28754,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_nc_axi2_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_nc_axi3_clk = {
-+ .halt_reg = 0x2875c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2875c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_nc_axi3_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_utcm0_clk = {
-+ .halt_reg = 0x28748,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28748,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_utcm0_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_utcm1_clk = {
-+ .halt_reg = 0x28750,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28750,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_utcm1_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_utcm2_clk = {
-+ .halt_reg = 0x28758,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28758,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_utcm2_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_ubi32_utcm3_clk = {
-+ .halt_reg = 0x28760,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28760,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_ubi32_utcm3_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_uniphy_port1_rx_clk = {
-+ .halt_reg = 0x28904,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28904,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_uniphy_port1_rx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port1_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_uniphy_port1_tx_clk = {
-+ .halt_reg = 0x28908,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28908,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_uniphy_port1_tx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port1_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_uniphy_port2_rx_clk = {
-+ .halt_reg = 0x2890c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2890c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_uniphy_port2_rx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port2_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_uniphy_port2_tx_clk = {
-+ .halt_reg = 0x28910,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28910,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_uniphy_port2_tx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port2_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_uniphy_port3_rx_clk = {
-+ .halt_reg = 0x28914,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28914,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_uniphy_port3_rx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port3_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_uniphy_port3_tx_clk = {
-+ .halt_reg = 0x28918,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28918,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_uniphy_port3_tx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port3_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_uniphy_port4_rx_clk = {
-+ .halt_reg = 0x2891c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2891c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_uniphy_port4_rx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port4_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_uniphy_port4_tx_clk = {
-+ .halt_reg = 0x28920,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28920,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_uniphy_port4_tx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port4_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_uniphy_port5_rx_clk = {
-+ .halt_reg = 0x28924,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28924,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_uniphy_port5_rx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port5_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_uniphy_port5_tx_clk = {
-+ .halt_reg = 0x28928,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28928,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_uniphy_port5_tx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port5_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_uniphy_port6_rx_clk = {
-+ .halt_reg = 0x2892c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2892c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_uniphy_port6_rx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port6_rx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_uniphy_port6_tx_clk = {
-+ .halt_reg = 0x28930,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28930,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_uniphy_port6_tx_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_port6_tx_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_xgmac0_ptp_ref_clk = {
-+ .halt_reg = 0x28264,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28264,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_xgmac0_ptp_ref_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_xgmac0_ptp_ref_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_xgmac1_ptp_ref_clk = {
-+ .halt_reg = 0x28268,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28268,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_xgmac1_ptp_ref_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_xgmac1_ptp_ref_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_xgmac2_ptp_ref_clk = {
-+ .halt_reg = 0x2826c,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x2826c,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_xgmac2_ptp_ref_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_xgmac2_ptp_ref_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_xgmac3_ptp_ref_clk = {
-+ .halt_reg = 0x28270,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28270,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_xgmac3_ptp_ref_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_xgmac3_ptp_ref_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_xgmac4_ptp_ref_clk = {
-+ .halt_reg = 0x28274,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28274,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_xgmac4_ptp_ref_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_xgmac4_ptp_ref_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_branch nss_cc_xgmac5_ptp_ref_clk = {
-+ .halt_reg = 0x28278,
-+ .halt_check = BRANCH_HALT,
-+ .clkr = {
-+ .enable_reg = 0x28278,
-+ .enable_mask = BIT(0),
-+ .hw.init = &(const struct clk_init_data) {
-+ .name = "nss_cc_xgmac5_ptp_ref_clk",
-+ .parent_data = &(const struct clk_parent_data) {
-+ .hw = &nss_cc_xgmac5_ptp_ref_div_clk_src.clkr.hw,
-+ },
-+ .num_parents = 1,
-+ .flags = CLK_SET_RATE_PARENT,
-+ .ops = &clk_branch2_ops,
-+ },
-+ },
-+};
-+
-+static struct clk_regmap *nss_cc_ipq9574_clocks[] = {
-+ [NSS_CC_CE_APB_CLK] = &nss_cc_ce_apb_clk.clkr,
-+ [NSS_CC_CE_AXI_CLK] = &nss_cc_ce_axi_clk.clkr,
-+ [NSS_CC_CE_CLK_SRC] = &nss_cc_ce_clk_src.clkr,
-+ [NSS_CC_CFG_CLK_SRC] = &nss_cc_cfg_clk_src.clkr,
-+ [NSS_CC_CLC_AXI_CLK] = &nss_cc_clc_axi_clk.clkr,
-+ [NSS_CC_CLC_CLK_SRC] = &nss_cc_clc_clk_src.clkr,
-+ [NSS_CC_CRYPTO_CLK] = &nss_cc_crypto_clk.clkr,
-+ [NSS_CC_CRYPTO_CLK_SRC] = &nss_cc_crypto_clk_src.clkr,
-+ [NSS_CC_CRYPTO_PPE_CLK] = &nss_cc_crypto_ppe_clk.clkr,
-+ [NSS_CC_HAQ_AHB_CLK] = &nss_cc_haq_ahb_clk.clkr,
-+ [NSS_CC_HAQ_AXI_CLK] = &nss_cc_haq_axi_clk.clkr,
-+ [NSS_CC_HAQ_CLK_SRC] = &nss_cc_haq_clk_src.clkr,
-+ [NSS_CC_IMEM_AHB_CLK] = &nss_cc_imem_ahb_clk.clkr,
-+ [NSS_CC_IMEM_CLK_SRC] = &nss_cc_imem_clk_src.clkr,
-+ [NSS_CC_IMEM_QSB_CLK] = &nss_cc_imem_qsb_clk.clkr,
-+ [NSS_CC_INT_CFG_CLK_SRC] = &nss_cc_int_cfg_clk_src.clkr,
-+ [NSS_CC_NSS_CSR_CLK] = &nss_cc_nss_csr_clk.clkr,
-+ [NSS_CC_NSSNOC_CE_APB_CLK] = &nss_cc_nssnoc_ce_apb_clk.clkr,
-+ [NSS_CC_NSSNOC_CE_AXI_CLK] = &nss_cc_nssnoc_ce_axi_clk.clkr,
-+ [NSS_CC_NSSNOC_CLC_AXI_CLK] = &nss_cc_nssnoc_clc_axi_clk.clkr,
-+ [NSS_CC_NSSNOC_CRYPTO_CLK] = &nss_cc_nssnoc_crypto_clk.clkr,
-+ [NSS_CC_NSSNOC_HAQ_AHB_CLK] = &nss_cc_nssnoc_haq_ahb_clk.clkr,
-+ [NSS_CC_NSSNOC_HAQ_AXI_CLK] = &nss_cc_nssnoc_haq_axi_clk.clkr,
-+ [NSS_CC_NSSNOC_IMEM_AHB_CLK] = &nss_cc_nssnoc_imem_ahb_clk.clkr,
-+ [NSS_CC_NSSNOC_IMEM_QSB_CLK] = &nss_cc_nssnoc_imem_qsb_clk.clkr,
-+ [NSS_CC_NSSNOC_NSS_CSR_CLK] = &nss_cc_nssnoc_nss_csr_clk.clkr,
-+ [NSS_CC_NSSNOC_PPE_CFG_CLK] = &nss_cc_nssnoc_ppe_cfg_clk.clkr,
-+ [NSS_CC_NSSNOC_PPE_CLK] = &nss_cc_nssnoc_ppe_clk.clkr,
-+ [NSS_CC_NSSNOC_UBI32_AHB0_CLK] = &nss_cc_nssnoc_ubi32_ahb0_clk.clkr,
-+ [NSS_CC_NSSNOC_UBI32_AXI0_CLK] = &nss_cc_nssnoc_ubi32_axi0_clk.clkr,
-+ [NSS_CC_NSSNOC_UBI32_INT0_AHB_CLK] =
-+ &nss_cc_nssnoc_ubi32_int0_ahb_clk.clkr,
-+ [NSS_CC_NSSNOC_UBI32_NC_AXI0_1_CLK] =
-+ &nss_cc_nssnoc_ubi32_nc_axi0_1_clk.clkr,
-+ [NSS_CC_NSSNOC_UBI32_NC_AXI0_CLK] =
-+ &nss_cc_nssnoc_ubi32_nc_axi0_clk.clkr,
-+ [NSS_CC_PORT1_MAC_CLK] = &nss_cc_port1_mac_clk.clkr,
-+ [NSS_CC_PORT1_RX_CLK] = &nss_cc_port1_rx_clk.clkr,
-+ [NSS_CC_PORT1_RX_CLK_SRC] = &nss_cc_port1_rx_clk_src.clkr,
-+ [NSS_CC_PORT1_RX_DIV_CLK_SRC] = &nss_cc_port1_rx_div_clk_src.clkr,
-+ [NSS_CC_PORT1_TX_CLK] = &nss_cc_port1_tx_clk.clkr,
-+ [NSS_CC_PORT1_TX_CLK_SRC] = &nss_cc_port1_tx_clk_src.clkr,
-+ [NSS_CC_PORT1_TX_DIV_CLK_SRC] = &nss_cc_port1_tx_div_clk_src.clkr,
-+ [NSS_CC_PORT2_MAC_CLK] = &nss_cc_port2_mac_clk.clkr,
-+ [NSS_CC_PORT2_RX_CLK] = &nss_cc_port2_rx_clk.clkr,
-+ [NSS_CC_PORT2_RX_CLK_SRC] = &nss_cc_port2_rx_clk_src.clkr,
-+ [NSS_CC_PORT2_RX_DIV_CLK_SRC] = &nss_cc_port2_rx_div_clk_src.clkr,
-+ [NSS_CC_PORT2_TX_CLK] = &nss_cc_port2_tx_clk.clkr,
-+ [NSS_CC_PORT2_TX_CLK_SRC] = &nss_cc_port2_tx_clk_src.clkr,
-+ [NSS_CC_PORT2_TX_DIV_CLK_SRC] = &nss_cc_port2_tx_div_clk_src.clkr,
-+ [NSS_CC_PORT3_MAC_CLK] = &nss_cc_port3_mac_clk.clkr,
-+ [NSS_CC_PORT3_RX_CLK] = &nss_cc_port3_rx_clk.clkr,
-+ [NSS_CC_PORT3_RX_CLK_SRC] = &nss_cc_port3_rx_clk_src.clkr,
-+ [NSS_CC_PORT3_RX_DIV_CLK_SRC] = &nss_cc_port3_rx_div_clk_src.clkr,
-+ [NSS_CC_PORT3_TX_CLK] = &nss_cc_port3_tx_clk.clkr,
-+ [NSS_CC_PORT3_TX_CLK_SRC] = &nss_cc_port3_tx_clk_src.clkr,
-+ [NSS_CC_PORT3_TX_DIV_CLK_SRC] = &nss_cc_port3_tx_div_clk_src.clkr,
-+ [NSS_CC_PORT4_MAC_CLK] = &nss_cc_port4_mac_clk.clkr,
-+ [NSS_CC_PORT4_RX_CLK] = &nss_cc_port4_rx_clk.clkr,
-+ [NSS_CC_PORT4_RX_CLK_SRC] = &nss_cc_port4_rx_clk_src.clkr,
-+ [NSS_CC_PORT4_RX_DIV_CLK_SRC] = &nss_cc_port4_rx_div_clk_src.clkr,
-+ [NSS_CC_PORT4_TX_CLK] = &nss_cc_port4_tx_clk.clkr,
-+ [NSS_CC_PORT4_TX_CLK_SRC] = &nss_cc_port4_tx_clk_src.clkr,
-+ [NSS_CC_PORT4_TX_DIV_CLK_SRC] = &nss_cc_port4_tx_div_clk_src.clkr,
-+ [NSS_CC_PORT5_MAC_CLK] = &nss_cc_port5_mac_clk.clkr,
-+ [NSS_CC_PORT5_RX_CLK] = &nss_cc_port5_rx_clk.clkr,
-+ [NSS_CC_PORT5_RX_CLK_SRC] = &nss_cc_port5_rx_clk_src.clkr,
-+ [NSS_CC_PORT5_RX_DIV_CLK_SRC] = &nss_cc_port5_rx_div_clk_src.clkr,
-+ [NSS_CC_PORT5_TX_CLK] = &nss_cc_port5_tx_clk.clkr,
-+ [NSS_CC_PORT5_TX_CLK_SRC] = &nss_cc_port5_tx_clk_src.clkr,
-+ [NSS_CC_PORT5_TX_DIV_CLK_SRC] = &nss_cc_port5_tx_div_clk_src.clkr,
-+ [NSS_CC_PORT6_MAC_CLK] = &nss_cc_port6_mac_clk.clkr,
-+ [NSS_CC_PORT6_RX_CLK] = &nss_cc_port6_rx_clk.clkr,
-+ [NSS_CC_PORT6_RX_CLK_SRC] = &nss_cc_port6_rx_clk_src.clkr,
-+ [NSS_CC_PORT6_RX_DIV_CLK_SRC] = &nss_cc_port6_rx_div_clk_src.clkr,
-+ [NSS_CC_PORT6_TX_CLK] = &nss_cc_port6_tx_clk.clkr,
-+ [NSS_CC_PORT6_TX_CLK_SRC] = &nss_cc_port6_tx_clk_src.clkr,
-+ [NSS_CC_PORT6_TX_DIV_CLK_SRC] = &nss_cc_port6_tx_div_clk_src.clkr,
-+ [NSS_CC_PPE_CLK_SRC] = &nss_cc_ppe_clk_src.clkr,
-+ [NSS_CC_PPE_EDMA_CFG_CLK] = &nss_cc_ppe_edma_cfg_clk.clkr,
-+ [NSS_CC_PPE_EDMA_CLK] = &nss_cc_ppe_edma_clk.clkr,
-+ [NSS_CC_PPE_SWITCH_BTQ_CLK] = &nss_cc_ppe_switch_btq_clk.clkr,
-+ [NSS_CC_PPE_SWITCH_CFG_CLK] = &nss_cc_ppe_switch_cfg_clk.clkr,
-+ [NSS_CC_PPE_SWITCH_CLK] = &nss_cc_ppe_switch_clk.clkr,
-+ [NSS_CC_PPE_SWITCH_IPE_CLK] = &nss_cc_ppe_switch_ipe_clk.clkr,
-+ [NSS_CC_UBI0_CLK_SRC] = &nss_cc_ubi0_clk_src.clkr,
-+ [NSS_CC_UBI0_DIV_CLK_SRC] = &nss_cc_ubi0_div_clk_src.clkr,
-+ [NSS_CC_UBI1_CLK_SRC] = &nss_cc_ubi1_clk_src.clkr,
-+ [NSS_CC_UBI1_DIV_CLK_SRC] = &nss_cc_ubi1_div_clk_src.clkr,
-+ [NSS_CC_UBI2_CLK_SRC] = &nss_cc_ubi2_clk_src.clkr,
-+ [NSS_CC_UBI2_DIV_CLK_SRC] = &nss_cc_ubi2_div_clk_src.clkr,
-+ [NSS_CC_UBI32_AHB0_CLK] = &nss_cc_ubi32_ahb0_clk.clkr,
-+ [NSS_CC_UBI32_AHB1_CLK] = &nss_cc_ubi32_ahb1_clk.clkr,
-+ [NSS_CC_UBI32_AHB2_CLK] = &nss_cc_ubi32_ahb2_clk.clkr,
-+ [NSS_CC_UBI32_AHB3_CLK] = &nss_cc_ubi32_ahb3_clk.clkr,
-+ [NSS_CC_UBI32_AXI0_CLK] = &nss_cc_ubi32_axi0_clk.clkr,
-+ [NSS_CC_UBI32_AXI1_CLK] = &nss_cc_ubi32_axi1_clk.clkr,
-+ [NSS_CC_UBI32_AXI2_CLK] = &nss_cc_ubi32_axi2_clk.clkr,
-+ [NSS_CC_UBI32_AXI3_CLK] = &nss_cc_ubi32_axi3_clk.clkr,
-+ [NSS_CC_UBI32_CORE0_CLK] = &nss_cc_ubi32_core0_clk.clkr,
-+ [NSS_CC_UBI32_CORE1_CLK] = &nss_cc_ubi32_core1_clk.clkr,
-+ [NSS_CC_UBI32_CORE2_CLK] = &nss_cc_ubi32_core2_clk.clkr,
-+ [NSS_CC_UBI32_CORE3_CLK] = &nss_cc_ubi32_core3_clk.clkr,
-+ [NSS_CC_UBI32_INTR0_AHB_CLK] = &nss_cc_ubi32_intr0_ahb_clk.clkr,
-+ [NSS_CC_UBI32_INTR1_AHB_CLK] = &nss_cc_ubi32_intr1_ahb_clk.clkr,
-+ [NSS_CC_UBI32_INTR2_AHB_CLK] = &nss_cc_ubi32_intr2_ahb_clk.clkr,
-+ [NSS_CC_UBI32_INTR3_AHB_CLK] = &nss_cc_ubi32_intr3_ahb_clk.clkr,
-+ [NSS_CC_UBI32_NC_AXI0_CLK] = &nss_cc_ubi32_nc_axi0_clk.clkr,
-+ [NSS_CC_UBI32_NC_AXI1_CLK] = &nss_cc_ubi32_nc_axi1_clk.clkr,
-+ [NSS_CC_UBI32_NC_AXI2_CLK] = &nss_cc_ubi32_nc_axi2_clk.clkr,
-+ [NSS_CC_UBI32_NC_AXI3_CLK] = &nss_cc_ubi32_nc_axi3_clk.clkr,
-+ [NSS_CC_UBI32_UTCM0_CLK] = &nss_cc_ubi32_utcm0_clk.clkr,
-+ [NSS_CC_UBI32_UTCM1_CLK] = &nss_cc_ubi32_utcm1_clk.clkr,
-+ [NSS_CC_UBI32_UTCM2_CLK] = &nss_cc_ubi32_utcm2_clk.clkr,
-+ [NSS_CC_UBI32_UTCM3_CLK] = &nss_cc_ubi32_utcm3_clk.clkr,
-+ [NSS_CC_UBI3_CLK_SRC] = &nss_cc_ubi3_clk_src.clkr,
-+ [NSS_CC_UBI3_DIV_CLK_SRC] = &nss_cc_ubi3_div_clk_src.clkr,
-+ [NSS_CC_UBI_AXI_CLK_SRC] = &nss_cc_ubi_axi_clk_src.clkr,
-+ [NSS_CC_UBI_NC_AXI_BFDCD_CLK_SRC] =
-+ &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr,
-+ [NSS_CC_UNIPHY_PORT1_RX_CLK] = &nss_cc_uniphy_port1_rx_clk.clkr,
-+ [NSS_CC_UNIPHY_PORT1_TX_CLK] = &nss_cc_uniphy_port1_tx_clk.clkr,
-+ [NSS_CC_UNIPHY_PORT2_RX_CLK] = &nss_cc_uniphy_port2_rx_clk.clkr,
-+ [NSS_CC_UNIPHY_PORT2_TX_CLK] = &nss_cc_uniphy_port2_tx_clk.clkr,
-+ [NSS_CC_UNIPHY_PORT3_RX_CLK] = &nss_cc_uniphy_port3_rx_clk.clkr,
-+ [NSS_CC_UNIPHY_PORT3_TX_CLK] = &nss_cc_uniphy_port3_tx_clk.clkr,
-+ [NSS_CC_UNIPHY_PORT4_RX_CLK] = &nss_cc_uniphy_port4_rx_clk.clkr,
-+ [NSS_CC_UNIPHY_PORT4_TX_CLK] = &nss_cc_uniphy_port4_tx_clk.clkr,
-+ [NSS_CC_UNIPHY_PORT5_RX_CLK] = &nss_cc_uniphy_port5_rx_clk.clkr,
-+ [NSS_CC_UNIPHY_PORT5_TX_CLK] = &nss_cc_uniphy_port5_tx_clk.clkr,
-+ [NSS_CC_UNIPHY_PORT6_RX_CLK] = &nss_cc_uniphy_port6_rx_clk.clkr,
-+ [NSS_CC_UNIPHY_PORT6_TX_CLK] = &nss_cc_uniphy_port6_tx_clk.clkr,
-+ [NSS_CC_XGMAC0_PTP_REF_CLK] = &nss_cc_xgmac0_ptp_ref_clk.clkr,
-+ [NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC] =
-+ &nss_cc_xgmac0_ptp_ref_div_clk_src.clkr,
-+ [NSS_CC_XGMAC1_PTP_REF_CLK] = &nss_cc_xgmac1_ptp_ref_clk.clkr,
-+ [NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC] =
-+ &nss_cc_xgmac1_ptp_ref_div_clk_src.clkr,
-+ [NSS_CC_XGMAC2_PTP_REF_CLK] = &nss_cc_xgmac2_ptp_ref_clk.clkr,
-+ [NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC] =
-+ &nss_cc_xgmac2_ptp_ref_div_clk_src.clkr,
-+ [NSS_CC_XGMAC3_PTP_REF_CLK] = &nss_cc_xgmac3_ptp_ref_clk.clkr,
-+ [NSS_CC_XGMAC3_PTP_REF_DIV_CLK_SRC] =
-+ &nss_cc_xgmac3_ptp_ref_div_clk_src.clkr,
-+ [NSS_CC_XGMAC4_PTP_REF_CLK] = &nss_cc_xgmac4_ptp_ref_clk.clkr,
-+ [NSS_CC_XGMAC4_PTP_REF_DIV_CLK_SRC] =
-+ &nss_cc_xgmac4_ptp_ref_div_clk_src.clkr,
-+ [NSS_CC_XGMAC5_PTP_REF_CLK] = &nss_cc_xgmac5_ptp_ref_clk.clkr,
-+ [NSS_CC_XGMAC5_PTP_REF_DIV_CLK_SRC] =
-+ &nss_cc_xgmac5_ptp_ref_div_clk_src.clkr,
-+ [UBI32_PLL] = &ubi32_pll.clkr,
-+ [UBI32_PLL_MAIN] = &ubi32_pll_main.clkr,
-+};
-+
-+static const struct qcom_reset_map nss_cc_ipq9574_resets[] = {
-+ [NSS_CC_CE_BCR] = { 0x28400, 0 },
-+ [NSS_CC_CLC_BCR] = { 0x28600, 0 },
-+ [NSS_CC_EIP197_BCR] = { 0x16004, 0 },
-+ [NSS_CC_HAQ_BCR] = { 0x28300, 0 },
-+ [NSS_CC_IMEM_BCR] = { 0xe004, 0 },
-+ [NSS_CC_MAC_BCR] = { 0x28100, 0 },
-+ [NSS_CC_PPE_BCR] = { 0x28200, 0 },
-+ [NSS_CC_UBI_BCR] = { 0x28700, 0 },
-+ [NSS_CC_UNIPHY_BCR] = { 0x28900, 0 },
-+ [UBI3_CLKRST_CLAMP_ENABLE] = { 0x28a04, 9 },
-+ [UBI3_CORE_CLAMP_ENABLE] = { 0x28a04, 8 },
-+ [UBI2_CLKRST_CLAMP_ENABLE] = { 0x28a04, 7 },
-+ [UBI2_CORE_CLAMP_ENABLE] = { 0x28a04, 6 },
-+ [UBI1_CLKRST_CLAMP_ENABLE] = { 0x28a04, 5 },
-+ [UBI1_CORE_CLAMP_ENABLE] = { 0x28a04, 4 },
-+ [UBI0_CLKRST_CLAMP_ENABLE] = { 0x28a04, 3 },
-+ [UBI0_CORE_CLAMP_ENABLE] = { 0x28a04, 2 },
-+ [NSSNOC_NSS_CSR_ARES] = { 0x28a04, 1 },
-+ [NSS_CSR_ARES] = { 0x28a04, 0 },
-+ [PPE_BTQ_ARES] = { 0x28a08, 20 },
-+ [PPE_IPE_ARES] = { 0x28a08, 19 },
-+ [PPE_ARES] = { 0x28a08, 18 },
-+ [PPE_CFG_ARES] = { 0x28a08, 17 },
-+ [PPE_EDMA_ARES] = { 0x28a08, 16 },
-+ [PPE_EDMA_CFG_ARES] = { 0x28a08, 15 },
-+ [CRY_PPE_ARES] = { 0x28a08, 14 },
-+ [NSSNOC_PPE_ARES] = { 0x28a08, 13 },
-+ [NSSNOC_PPE_CFG_ARES] = { 0x28a08, 12 },
-+ [PORT1_MAC_ARES] = { 0x28a08, 11 },
-+ [PORT2_MAC_ARES] = { 0x28a08, 10 },
-+ [PORT3_MAC_ARES] = { 0x28a08, 9 },
-+ [PORT4_MAC_ARES] = { 0x28a08, 8 },
-+ [PORT5_MAC_ARES] = { 0x28a08, 7 },
-+ [PORT6_MAC_ARES] = { 0x28a08, 6 },
-+ [XGMAC0_PTP_REF_ARES] = { 0x28a08, 5 },
-+ [XGMAC1_PTP_REF_ARES] = { 0x28a08, 4 },
-+ [XGMAC2_PTP_REF_ARES] = { 0x28a08, 3 },
-+ [XGMAC3_PTP_REF_ARES] = { 0x28a08, 2 },
-+ [XGMAC4_PTP_REF_ARES] = { 0x28a08, 1 },
-+ [XGMAC5_PTP_REF_ARES] = { 0x28a08, 0 },
-+ [HAQ_AHB_ARES] = { 0x28a0c, 3 },
-+ [HAQ_AXI_ARES] = { 0x28a0c, 2 },
-+ [NSSNOC_HAQ_AHB_ARES] = { 0x28a0c, 1 },
-+ [NSSNOC_HAQ_AXI_ARES] = { 0x28a0c, 0 },
-+ [CE_APB_ARES] = { 0x28a10, 3 },
-+ [CE_AXI_ARES] = { 0x28a10, 2 },
-+ [NSSNOC_CE_APB_ARES] = { 0x28a10, 1 },
-+ [NSSNOC_CE_AXI_ARES] = { 0x28a10, 0 },
-+ [CRYPTO_ARES] = { 0x28a14, 1 },
-+ [NSSNOC_CRYPTO_ARES] = { 0x28a14, 0 },
-+ [NSSNOC_NC_AXI0_1_ARES] = { 0x28a1c, 28 },
-+ [UBI0_CORE_ARES] = { 0x28a1c, 27 },
-+ [UBI1_CORE_ARES] = { 0x28a1c, 26 },
-+ [UBI2_CORE_ARES] = { 0x28a1c, 25 },
-+ [UBI3_CORE_ARES] = { 0x28a1c, 24 },
-+ [NC_AXI0_ARES] = { 0x28a1c, 23 },
-+ [UTCM0_ARES] = { 0x28a1c, 22 },
-+ [NC_AXI1_ARES] = { 0x28a1c, 21 },
-+ [UTCM1_ARES] = { 0x28a1c, 20 },
-+ [NC_AXI2_ARES] = { 0x28a1c, 19 },
-+ [UTCM2_ARES] = { 0x28a1c, 18 },
-+ [NC_AXI3_ARES] = { 0x28a1c, 17 },
-+ [UTCM3_ARES] = { 0x28a1c, 16 },
-+ [NSSNOC_NC_AXI0_ARES] = { 0x28a1c, 15 },
-+ [AHB0_ARES] = { 0x28a1c, 14 },
-+ [INTR0_AHB_ARES] = { 0x28a1c, 13 },
-+ [AHB1_ARES] = { 0x28a1c, 12 },
-+ [INTR1_AHB_ARES] = { 0x28a1c, 11 },
-+ [AHB2_ARES] = { 0x28a1c, 10 },
-+ [INTR2_AHB_ARES] = { 0x28a1c, 9 },
-+ [AHB3_ARES] = { 0x28a1c, 8 },
-+ [INTR3_AHB_ARES] = { 0x28a1c, 7 },
-+ [NSSNOC_AHB0_ARES] = { 0x28a1c, 6 },
-+ [NSSNOC_INT0_AHB_ARES] = { 0x28a1c, 5 },
-+ [AXI0_ARES] = { 0x28a1c, 4 },
-+ [AXI1_ARES] = { 0x28a1c, 3 },
-+ [AXI2_ARES] = { 0x28a1c, 2 },
-+ [AXI3_ARES] = { 0x28a1c, 1 },
-+ [NSSNOC_AXI0_ARES] = { 0x28a1c, 0 },
-+ [IMEM_QSB_ARES] = { 0x28a20, 3 },
-+ [NSSNOC_IMEM_QSB_ARES] = { 0x28a20, 2 },
-+ [IMEM_AHB_ARES] = { 0x28a20, 1 },
-+ [NSSNOC_IMEM_AHB_ARES] = { 0x28a20, 0 },
-+ [UNIPHY_PORT1_RX_ARES] = { 0x28a24, 23 },
-+ [UNIPHY_PORT1_TX_ARES] = { 0x28a24, 22 },
-+ [UNIPHY_PORT2_RX_ARES] = { 0x28a24, 21 },
-+ [UNIPHY_PORT2_TX_ARES] = { 0x28a24, 20 },
-+ [UNIPHY_PORT3_RX_ARES] = { 0x28a24, 19 },
-+ [UNIPHY_PORT3_TX_ARES] = { 0x28a24, 18 },
-+ [UNIPHY_PORT4_RX_ARES] = { 0x28a24, 17 },
-+ [UNIPHY_PORT4_TX_ARES] = { 0x28a24, 16 },
-+ [UNIPHY_PORT5_RX_ARES] = { 0x28a24, 15 },
-+ [UNIPHY_PORT5_TX_ARES] = { 0x28a24, 14 },
-+ [UNIPHY_PORT6_RX_ARES] = { 0x28a24, 13 },
-+ [UNIPHY_PORT6_TX_ARES] = { 0x28a24, 12 },
-+ [PORT1_RX_ARES] = { 0x28a24, 11 },
-+ [PORT1_TX_ARES] = { 0x28a24, 10 },
-+ [PORT2_RX_ARES] = { 0x28a24, 9 },
-+ [PORT2_TX_ARES] = { 0x28a24, 8 },
-+ [PORT3_RX_ARES] = { 0x28a24, 7 },
-+ [PORT3_TX_ARES] = { 0x28a24, 6 },
-+ [PORT4_RX_ARES] = { 0x28a24, 5 },
-+ [PORT4_TX_ARES] = { 0x28a24, 4 },
-+ [PORT5_RX_ARES] = { 0x28a24, 3 },
-+ [PORT5_TX_ARES] = { 0x28a24, 2 },
-+ [PORT6_RX_ARES] = { 0x28a24, 1 },
-+ [PORT6_TX_ARES] = { 0x28a24, 0 },
-+ [PPE_FULL_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(20, 17) },
-+ [UNIPHY0_SOFT_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(23, 14) },
-+ [UNIPHY1_SOFT_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(15, 14) },
-+ [UNIPHY2_SOFT_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(13, 12) },
-+ [UNIPHY_PORT1_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(23, 22) },
-+ [UNIPHY_PORT2_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(21, 20) },
-+ [UNIPHY_PORT3_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(19, 18) },
-+ [UNIPHY_PORT4_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(17, 16) },
-+ [UNIPHY_PORT5_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(15, 14) },
-+ [UNIPHY_PORT6_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(13, 12) },
-+ [NSSPORT1_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(11, 10) },
-+ [NSSPORT2_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(9, 8) },
-+ [NSSPORT3_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(7, 6) },
-+ [NSSPORT4_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(5, 4) },
-+ [NSSPORT5_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(3, 2) },
-+ [NSSPORT6_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(1, 0) },
-+ [EDMA_HW_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(16, 15) },
-+};
-+
-+static const struct regmap_config nss_cc_ipq9574_regmap_config = {
-+ .reg_bits = 32,
-+ .reg_stride = 4,
-+ .val_bits = 32,
-+ .max_register = 0x28a34,
-+ .fast_io = true,
-+};
-+
-+static struct qcom_icc_hws_data icc_ipq9574_nss_hws[] = {
-+ { MASTER_NSSNOC_PPE, SLAVE_NSSNOC_PPE, NSS_CC_NSSNOC_PPE_CLK },
-+ { MASTER_NSSNOC_PPE_CFG, SLAVE_NSSNOC_PPE_CFG, NSS_CC_NSSNOC_PPE_CFG_CLK },
-+ { MASTER_NSSNOC_NSS_CSR, SLAVE_NSSNOC_NSS_CSR, NSS_CC_NSSNOC_NSS_CSR_CLK },
-+ { MASTER_NSSNOC_IMEM_QSB, SLAVE_NSSNOC_IMEM_QSB, NSS_CC_NSSNOC_IMEM_QSB_CLK },
-+ { MASTER_NSSNOC_IMEM_AHB, SLAVE_NSSNOC_IMEM_AHB, NSS_CC_NSSNOC_IMEM_AHB_CLK },
-+};
-+
-+#define IPQ_NSSCC_ID (9574 * 2) /* some unique value */
-+
-+static const struct qcom_cc_desc nss_cc_ipq9574_desc = {
-+ .config = &nss_cc_ipq9574_regmap_config,
-+ .clks = nss_cc_ipq9574_clocks,
-+ .num_clks = ARRAY_SIZE(nss_cc_ipq9574_clocks),
-+ .resets = nss_cc_ipq9574_resets,
-+ .num_resets = ARRAY_SIZE(nss_cc_ipq9574_resets),
-+ .icc_hws = icc_ipq9574_nss_hws,
-+ .num_icc_hws = ARRAY_SIZE(icc_ipq9574_nss_hws),
-+ .icc_first_node_id = IPQ_NSSCC_ID,
-+};
-+
-+static const struct of_device_id nss_cc_ipq9574_match_table[] = {
-+ { .compatible = "qcom,ipq9574-nsscc" },
-+ { }
-+};
-+MODULE_DEVICE_TABLE(of, nss_cc_ipq9574_match_table);
-+
-+static int nss_cc_ipq9574_probe(struct platform_device *pdev)
-+{
-+ struct regmap *regmap;
-+
-+ regmap = qcom_cc_map(pdev, &nss_cc_ipq9574_desc);
-+ if (IS_ERR(regmap))
-+ return PTR_ERR(regmap);
-+
-+ clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
-+
-+ return qcom_cc_really_probe(&pdev->dev, &nss_cc_ipq9574_desc, regmap);
-+}
-+
-+static struct platform_driver nss_cc_ipq9574_driver = {
-+ .probe = nss_cc_ipq9574_probe,
-+ .driver = {
-+ .name = "qcom,nsscc-ipq9574",
-+ .of_match_table = nss_cc_ipq9574_match_table,
-+ .sync_state = icc_sync_state,
-+ },
-+};
-+
-+module_platform_driver(nss_cc_ipq9574_driver);
-+
-+MODULE_DESCRIPTION("QTI NSS_CC IPQ9574 Driver");
-+MODULE_LICENSE("GPL");
+++ /dev/null
-From 03cbf5e97bf4cd863aff002cb5e6def43f2034d0 Mon Sep 17 00:00:00 2001
-From: Devi Priya <quic_devipriy@quicinc.com>
-Date: Fri, 25 Oct 2024 09:25:19 +0530
-Subject: [PATCH 6/7] arm64: dts: qcom: ipq9574: Add nsscc node
-
-Add a node for the nss clock controller found on ipq9574 based devices.
-
-Signed-off-by: Devi Priya <quic_devipriy@quicinc.com>
-Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 22 ++++++++++++++++++++++
- 1 file changed, 22 insertions(+)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -12,6 +12,8 @@
- #include <dt-bindings/interconnect/qcom,ipq9574.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
- #include <dt-bindings/reset/qcom,ipq9574-gcc.h>
-+#include <dt-bindings/clock/qcom,ipq9574-nsscc.h>
-+#include <dt-bindings/reset/qcom,ipq9574-nsscc.h>
- #include <dt-bindings/thermal/thermal.h>
-
- / {
-@@ -1216,6 +1218,25 @@
- status = "disabled";
- };
-
-+ nsscc: clock-controller@39b00000 {
-+ compatible = "qcom,ipq9574-nsscc";
-+ reg = <0x39b00000 0x80000>;
-+ clocks = <&xo_board_clk>,
-+ <&cmn_pll NSS_1200MHZ_CLK>,
-+ <&cmn_pll PPE_353MHZ_CLK>,
-+ <&gcc GPLL0_OUT_AUX>,
-+ <0>,
-+ <0>,
-+ <0>,
-+ <0>,
-+ <0>,
-+ <0>,
-+ <&gcc GCC_NSSCC_CLK>;
-+ #clock-cells = <1>;
-+ #reset-cells = <1>;
-+ #power-domain-cells = <1>;
-+ #interconnect-cells = <1>;
-+ };
- };
-
- thermal-zones {
+++ /dev/null
-From 2a7892782bf2cb84f204702ef47ae2d97be0d46b Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Mon, 9 Dec 2024 18:04:00 +0100
-Subject: [PATCH 2/2] arm64: dts: qcom: fix USB vdda-pll supply for ipq9574 rdp
- common
-
-Fix USB vdda-pll for IPQ9574 SoC common for every RDP board. The correct
-regulator is the l5 one instead of l2 now supported in the smd regulator
-driver.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-@@ -75,7 +75,7 @@
- regulator-max-microvolt = <1075000>;
- };
-
-- mp5496_l2: l2 {
-+ mp5496_l5: l5 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
-@@ -145,7 +145,7 @@
- };
-
- &usb_0_qmpphy {
-- vdda-pll-supply = <&mp5496_l2>;
-+ vdda-pll-supply = <&mp5496_l5>;
- vdda-phy-supply = <®ulator_fixed_0p925>;
-
- status = "okay";
-@@ -153,7 +153,7 @@
-
- &usb_0_qusbphy {
- vdd-supply = <®ulator_fixed_0p925>;
-- vdda-pll-supply = <&mp5496_l2>;
-+ vdda-pll-supply = <&mp5496_l5>;
- vdda-phy-dpdm-supply = <®ulator_fixed_3p3>;
-
- status = "okay";
+++ /dev/null
-From 3105ff9d7111d15b686b8d14e8b4413a5c2a88ce Mon Sep 17 00:00:00 2001
-From: Lei Wei <quic_leiwei@quicinc.com>
-Date: Thu, 1 Feb 2024 13:03:14 +0800
-Subject: [PATCH 13/17] arm64: dts: qcom: ipq9574: Add PCS UNIPHY device tree
- support
-
-The UNIPHY block in the IPQ SoC enables PCS/XPCS functions and helps in
-interfacing the Ethernet MAC to external PHYs.
-
-There are three PCS UNIPHY instances available in the IPQ9574 SoC. The
-first UNIPHY has four PCS channels which can connect to QCA8075 Quad
-PHYs in QSGMII mode or QCA8085 PHYs with 10G-QXGMII mode. The second
-and third UNIPHYs each has one PCS channel which can connect with single
-10G capable PHY such as Aquantia 113c PHY in USXGMII mode.
-
-Change-Id: I7832a71b12730d5bd7926a25f4feda371c09b58e
-Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 110 +++++++++++++++++++++++++-
- 1 file changed, 109 insertions(+), 1 deletion(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -3,7 +3,7 @@
- * IPQ9574 SoC device tree source
- *
- * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
-- * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
-+ * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
- #include <dt-bindings/clock/qcom,apss-ipq.h>
-@@ -1237,6 +1237,114 @@
- #power-domain-cells = <1>;
- #interconnect-cells = <1>;
- };
-+
-+ pcsuniphy0: ethernet-uniphy@7a00000 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ compatible = "qcom,ipq9574-uniphy";
-+ reg = <0x7a00000 0x10000>;
-+ clocks = <&gcc GCC_UNIPHY0_SYS_CLK>,
-+ <&gcc GCC_UNIPHY0_AHB_CLK>;
-+ clock-names = "sys",
-+ "ahb";
-+ resets = <&gcc GCC_UNIPHY0_SYS_RESET>,
-+ <&gcc GCC_UNIPHY0_AHB_RESET>,
-+ <&gcc GCC_UNIPHY0_XPCS_RESET>;
-+ reset-names = "sys",
-+ "ahb",
-+ "xpcs";
-+ #clock-cells = <1>;
-+ clock-output-names = "uniphy0_nss_rx_clk",
-+ "uniphy0_nss_tx_clk";
-+
-+ pcsuniphy0_ch0: uniphy-ch@0 {
-+ reg = <0>;
-+ clocks = <&nsscc NSS_CC_UNIPHY_PORT1_RX_CLK>,
-+ <&nsscc NSS_CC_UNIPHY_PORT1_TX_CLK>;
-+ clock-names = "ch_rx",
-+ "ch_tx";
-+ };
-+
-+ pcsuniphy0_ch1: uniphy-ch@1 {
-+ reg = <1>;
-+ clocks = <&nsscc NSS_CC_UNIPHY_PORT2_RX_CLK>,
-+ <&nsscc NSS_CC_UNIPHY_PORT2_TX_CLK>;
-+ clock-names = "ch_rx",
-+ "ch_tx";
-+ };
-+
-+ pcsuniphy0_ch2: uniphy-ch@2 {
-+ reg = <2>;
-+ clocks = <&nsscc NSS_CC_UNIPHY_PORT3_RX_CLK>,
-+ <&nsscc NSS_CC_UNIPHY_PORT3_TX_CLK>;
-+ clock-names = "ch_rx",
-+ "ch_tx";
-+ };
-+
-+ pcsuniphy0_ch3: uniphy-ch@3 {
-+ reg = <3>;
-+ clocks = <&nsscc NSS_CC_UNIPHY_PORT4_RX_CLK>,
-+ <&nsscc NSS_CC_UNIPHY_PORT4_TX_CLK>;
-+ clock-names = "ch_rx",
-+ "ch_tx";
-+ };
-+ };
-+
-+ pcsuniphy1: ethernet-uniphy@7a10000 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ compatible = "qcom,ipq9574-uniphy";
-+ reg = <0x7a10000 0x10000>;
-+ clocks = <&gcc GCC_UNIPHY1_SYS_CLK>,
-+ <&gcc GCC_UNIPHY1_AHB_CLK>;
-+ clock-names = "sys",
-+ "ahb";
-+ resets = <&gcc GCC_UNIPHY1_SYS_RESET>,
-+ <&gcc GCC_UNIPHY1_AHB_RESET>,
-+ <&gcc GCC_UNIPHY1_XPCS_RESET>;
-+ reset-names = "sys",
-+ "ahb",
-+ "xpcs";
-+ #clock-cells = <1>;
-+ clock-output-names = "uniphy1_nss_rx_clk",
-+ "uniphy1_nss_tx_clk";
-+
-+ pcsuniphy1_ch0: uniphy-ch@0 {
-+ reg = <0>;
-+ clocks = <&nsscc NSS_CC_UNIPHY_PORT5_RX_CLK>,
-+ <&nsscc NSS_CC_UNIPHY_PORT5_TX_CLK>;
-+ clock-names = "ch_rx",
-+ "ch_tx";
-+ };
-+ };
-+
-+ pcsuniphy2: ethernet-uniphy@7a20000 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ compatible = "qcom,ipq9574-uniphy";
-+ reg = <0x7a20000 0x10000>;
-+ clocks = <&gcc GCC_UNIPHY2_SYS_CLK>,
-+ <&gcc GCC_UNIPHY2_AHB_CLK>;
-+ clock-names = "sys",
-+ "ahb";
-+ resets = <&gcc GCC_UNIPHY2_SYS_RESET>,
-+ <&gcc GCC_UNIPHY2_AHB_RESET>,
-+ <&gcc GCC_UNIPHY2_XPCS_RESET>;
-+ reset-names = "sys",
-+ "ahb",
-+ "xpcs";
-+ #clock-cells = <1>;
-+ clock-output-names = "uniphy2_nss_rx_clk",
-+ "uniphy2_nss_tx_clk";
-+
-+ pcsuniphy2_ch0: uniphy-ch@0 {
-+ reg = <0>;
-+ clocks = <&nsscc NSS_CC_UNIPHY_PORT6_RX_CLK>,
-+ <&nsscc NSS_CC_UNIPHY_PORT6_TX_CLK>;
-+ clock-names = "ch_rx",
-+ "ch_tx";
-+ };
-+ };
- };
-
- thermal-zones {
+++ /dev/null
-From 3e98aaf9e5c6b2206edce3309beb1adeb2b61b60 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Thu, 23 Nov 2023 15:41:20 +0800
-Subject: [PATCH 15/17] arm64: dts: qcom: Add IPQ9574 MDIO device node
-
-The MDIO bus master block is used to accessing the MDIO slave
-device (such as PHY device), the dedicated MDIO PINs needs to
-be configured.
-
-Change-Id: Ia64083529e693256dbd8f8af4071c02afdded8f9
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 18 ++++++++++++++++++
- 1 file changed, 18 insertions(+)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -297,6 +297,8 @@
- mdio: mdio@90000 {
- compatible = "qcom,ipq9574-mdio", "qcom,ipq4019-mdio";
- reg = <0x00090000 0x64>;
-+ pinctrl-0 = <&mdio_pins>;
-+ pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&gcc GCC_MDIO_AHB_CLK>;
-@@ -414,6 +416,22 @@
- interrupt-controller;
- #interrupt-cells = <2>;
-
-+ mdio_pins: mdio-pins {
-+ mdc-state {
-+ pins = "gpio38";
-+ function = "mdc";
-+ drive-strength = <8>;
-+ bias-disable;
-+ };
-+
-+ mdio-state {
-+ pins = "gpio39";
-+ function = "mdio";
-+ drive-strength = <8>;
-+ bias-pull-up;
-+ };
-+ };
-+
- uart2_pins: uart2-state {
- pins = "gpio34", "gpio35";
- function = "blsp2_uart";
+++ /dev/null
-From ff847b8692e877e660b64ff2de4f26c6f7ce932e Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Fri, 1 Mar 2024 14:46:45 +0800
-Subject: [PATCH 14/17] arm64: dts: qcom: Add IPQ9574 PPE base device node
-
-PPE is the packet process engine on the Qualcomm IPQ platform,
-which is connected with the external switch or PHY device via
-the UNIPHY (PCS).
-
-Change-Id: I254bd48c218aa4eab54f697a2ad149f5a93b682c
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 38 +++++++++++++++++++++++++++
- 1 file changed, 38 insertions(+)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -1256,6 +1256,44 @@
- #interconnect-cells = <1>;
- };
-
-+ ethernet@3a000000 {
-+ compatible = "qcom,ipq9574-ppe";
-+ reg = <0x3a000000 0xbef800>;
-+ ranges;
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ clocks = <&nsscc NSS_CC_PPE_SWITCH_CLK>,
-+ <&nsscc NSS_CC_PPE_SWITCH_CFG_CLK>,
-+ <&nsscc NSS_CC_PPE_SWITCH_IPE_CLK>,
-+ <&nsscc NSS_CC_PPE_SWITCH_BTQ_CLK>;
-+ clock-names = "ppe",
-+ "ppe_cfg",
-+ "ppe_ipe",
-+ "ppe_btq";
-+ resets = <&nsscc PPE_FULL_RESET>;
-+ interconnects = <&nsscc MASTER_NSSNOC_PPE
-+ &nsscc SLAVE_NSSNOC_PPE>,
-+ <&nsscc MASTER_NSSNOC_PPE_CFG
-+ &nsscc SLAVE_NSSNOC_PPE_CFG>,
-+ <&gcc MASTER_NSSNOC_QOSGEN_REF
-+ &gcc SLAVE_NSSNOC_QOSGEN_REF>,
-+ <&gcc MASTER_NSSNOC_TIMEOUT_REF
-+ &gcc SLAVE_NSSNOC_TIMEOUT_REF>,
-+ <&gcc MASTER_MEM_NOC_NSSNOC
-+ &gcc SLAVE_MEM_NOC_NSSNOC>,
-+ <&gcc MASTER_NSSNOC_MEMNOC
-+ &gcc SLAVE_NSSNOC_MEMNOC>,
-+ <&gcc MASTER_NSSNOC_MEM_NOC_1
-+ &gcc SLAVE_NSSNOC_MEM_NOC_1>;
-+ interconnect-names = "ppe",
-+ "ppe_cfg",
-+ "qos_gen",
-+ "timeout_ref",
-+ "nssnoc_memnoc",
-+ "memnoc_nssnoc",
-+ "memnoc_nssnoc_1";
-+ };
-+
- pcsuniphy0: ethernet-uniphy@7a00000 {
- #address-cells = <1>;
- #size-cells = <0>;
+++ /dev/null
-From 14caaa7a23404cfee65a0d74b61d7998f762c70f Mon Sep 17 00:00:00 2001
-From: Pavithra R <quic_pavir@quicinc.com>
-Date: Wed, 6 Mar 2024 22:29:41 +0530
-Subject: [PATCH 16/17] arm64: dts: qcom: Add EDMA node for IPQ9574
-
-Add EDMA (Ethernet DMA) device tree node for IPQ9574 to
-enable ethernet support.
-
-Change-Id: I87d7c50f2485c8670948dce305000337f6499f8b
-Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 68 +++++++++++++++++++++++++++
- 1 file changed, 68 insertions(+)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -1292,6 +1292,74 @@
- "nssnoc_memnoc",
- "memnoc_nssnoc",
- "memnoc_nssnoc_1";
-+
-+ edma {
-+ compatible = "qcom,ipq9574-edma";
-+ clocks = <&nsscc NSS_CC_PPE_EDMA_CLK>,
-+ <&nsscc NSS_CC_PPE_EDMA_CFG_CLK>;
-+ clock-names = "edma",
-+ "edma-cfg";
-+ resets = <&nsscc EDMA_HW_RESET>;
-+ reset-names = "edma_rst";
-+ interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 380 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 509 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 505 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 504 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 503 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 502 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 501 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 500 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 499 IRQ_TYPE_LEVEL_HIGH>;
-+ interrupt-names = "edma_txcmpl_8",
-+ "edma_txcmpl_9",
-+ "edma_txcmpl_10",
-+ "edma_txcmpl_11",
-+ "edma_txcmpl_12",
-+ "edma_txcmpl_13",
-+ "edma_txcmpl_14",
-+ "edma_txcmpl_15",
-+ "edma_txcmpl_16",
-+ "edma_txcmpl_17",
-+ "edma_txcmpl_18",
-+ "edma_txcmpl_19",
-+ "edma_txcmpl_20",
-+ "edma_txcmpl_21",
-+ "edma_txcmpl_22",
-+ "edma_txcmpl_23",
-+ "edma_txcmpl_24",
-+ "edma_txcmpl_25",
-+ "edma_txcmpl_26",
-+ "edma_txcmpl_27",
-+ "edma_txcmpl_28",
-+ "edma_txcmpl_29",
-+ "edma_txcmpl_30",
-+ "edma_txcmpl_31",
-+ "edma_rxdesc_20",
-+ "edma_rxdesc_21",
-+ "edma_rxdesc_22",
-+ "edma_rxdesc_23",
-+ "edma_misc";
-+ };
- };
-
- pcsuniphy0: ethernet-uniphy@7a00000 {
+++ /dev/null
-From 9f3d547ccaf1113244f9aeb1a849e553321869ea Mon Sep 17 00:00:00 2001
-From: Lei Wei <quic_leiwei@quicinc.com>
-Date: Tue, 14 May 2024 10:53:27 +0800
-Subject: [PATCH 17/17] arm64: dts: qcom: Add IPQ9574 RDP433 port node
-
-There are 6 PPE MAC ports available on RDP433. The port1-port4 are
-connected with QCA8075 QUAD PHYs through UNIPHY0 PCS channel0-channel3.
-The port5 is connected with Aquantia PHY through UNIPHY1 PCS channel0
-and the port6 is connected with Aquantia PHY through UNIPHY2 PCS
-channel0.
-
-Change-Id: Ic16efdef2fe2cff7b1e80245619c0f82afb24cb9
-Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
----
- arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts | 169 +++++++++++++++++++-
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 2 +-
- 2 files changed, 169 insertions(+), 2 deletions(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
-@@ -55,6 +55,46 @@
- status = "okay";
- };
-
-+&mdio {
-+ reset-gpios = <&tlmm 60 GPIO_ACTIVE_LOW>;
-+ clock-frequency = <6250000>;
-+ status = "okay";
-+
-+ ethernet-phy-package@0 {
-+ compatible = "qcom,qca8075-package";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x10>;
-+ qcom,package-mode = "qsgmii";
-+
-+ phy0: ethernet-phy@10 {
-+ reg = <0x10>;
-+ };
-+
-+ phy1: ethernet-phy@11 {
-+ reg = <0x11>;
-+ };
-+
-+ phy2: ethernet-phy@12 {
-+ reg = <0x12>;
-+ };
-+
-+ phy3: ethernet-phy@13 {
-+ reg = <0x13>;
-+ };
-+ };
-+
-+ phy4: ethernet-phy@8 {
-+ compatible ="ethernet-phy-ieee802.3-c45";
-+ reg = <8>;
-+ };
-+
-+ phy5: ethernet-phy@0 {
-+ compatible ="ethernet-phy-ieee802.3-c45";
-+ reg = <0>;
-+ };
-+};
-+
- &sdhc_1 {
- pinctrl-0 = <&sdc_default_state>;
- pinctrl-names = "default";
-@@ -173,3 +213,130 @@
- };
- };
- };
-+
-+&qcom_ppe {
-+ ethernet-ports {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ port@1 {
-+ reg = <1>;
-+ phy-mode = "qsgmii";
-+ managed = "in-band-status";
-+ phy-handle = <&phy0>;
-+ pcs-handle = <&pcsuniphy0_ch0>;
-+ clocks = <&nsscc NSS_CC_PORT1_MAC_CLK>,
-+ <&nsscc NSS_CC_PORT1_RX_CLK>,
-+ <&nsscc NSS_CC_PORT1_TX_CLK>;
-+ clock-names = "port_mac",
-+ "port_rx",
-+ "port_tx";
-+ resets = <&nsscc PORT1_MAC_ARES>,
-+ <&nsscc PORT1_RX_ARES>,
-+ <&nsscc PORT1_TX_ARES>;
-+ reset-names = "port_mac",
-+ "port_rx",
-+ "port_tx";
-+ };
-+
-+ port@2 {
-+ reg = <2>;
-+ phy-mode = "qsgmii";
-+ managed = "in-band-status";
-+ phy-handle = <&phy1>;
-+ pcs-handle = <&pcsuniphy0_ch1>;
-+ clocks = <&nsscc NSS_CC_PORT2_MAC_CLK>,
-+ <&nsscc NSS_CC_PORT2_RX_CLK>,
-+ <&nsscc NSS_CC_PORT2_TX_CLK>;
-+ clock-names = "port_mac",
-+ "port_rx",
-+ "port_tx";
-+ resets = <&nsscc PORT2_MAC_ARES>,
-+ <&nsscc PORT2_RX_ARES>,
-+ <&nsscc PORT2_TX_ARES>;
-+ reset-names = "port_mac",
-+ "port_rx",
-+ "port_tx";
-+ };
-+
-+ port@3 {
-+ reg = <3>;
-+ phy-mode = "qsgmii";
-+ managed = "in-band-status";
-+ phy-handle = <&phy2>;
-+ pcs-handle = <&pcsuniphy0_ch2>;
-+ clocks = <&nsscc NSS_CC_PORT3_MAC_CLK>,
-+ <&nsscc NSS_CC_PORT3_RX_CLK>,
-+ <&nsscc NSS_CC_PORT3_TX_CLK>;
-+ clock-names = "port_mac",
-+ "port_rx",
-+ "port_tx";
-+ resets = <&nsscc PORT3_MAC_ARES>,
-+ <&nsscc PORT3_RX_ARES>,
-+ <&nsscc PORT3_TX_ARES>;
-+ reset-names = "port_mac",
-+ "port_rx",
-+ "port_tx";
-+ };
-+
-+ port@4 {
-+ reg = <4>;
-+ phy-mode = "qsgmii";
-+ managed = "in-band-status";
-+ phy-handle = <&phy3>;
-+ pcs-handle = <&pcsuniphy0_ch3>;
-+ clocks = <&nsscc NSS_CC_PORT4_MAC_CLK>,
-+ <&nsscc NSS_CC_PORT4_RX_CLK>,
-+ <&nsscc NSS_CC_PORT4_TX_CLK>;
-+ clock-names = "port_mac",
-+ "port_rx",
-+ "port_tx";
-+ resets = <&nsscc PORT4_MAC_ARES>,
-+ <&nsscc PORT4_RX_ARES>,
-+ <&nsscc PORT4_TX_ARES>;
-+ reset-names = "port_mac",
-+ "port_rx",
-+ "port_tx";
-+ };
-+
-+ port@5 {
-+ reg = <5>;
-+ phy-mode = "usxgmii";
-+ managed = "in-band-status";
-+ phy-handle = <&phy4>;
-+ pcs-handle = <&pcsuniphy1_ch0>;
-+ clocks = <&nsscc NSS_CC_PORT5_MAC_CLK>,
-+ <&nsscc NSS_CC_PORT5_RX_CLK>,
-+ <&nsscc NSS_CC_PORT5_TX_CLK>;
-+ clock-names = "port_mac",
-+ "port_rx",
-+ "port_tx";
-+ resets = <&nsscc PORT5_MAC_ARES>,
-+ <&nsscc PORT5_RX_ARES>,
-+ <&nsscc PORT5_TX_ARES>;
-+ reset-names = "port_mac",
-+ "port_rx",
-+ "port_tx";
-+ };
-+
-+ port@6 {
-+ reg = <6>;
-+ phy-mode = "usxgmii";
-+ managed = "in-band-status";
-+ phy-handle = <&phy5>;
-+ pcs-handle = <&pcsuniphy2_ch0>;
-+ clocks = <&nsscc NSS_CC_PORT6_MAC_CLK>,
-+ <&nsscc NSS_CC_PORT6_RX_CLK>,
-+ <&nsscc NSS_CC_PORT6_TX_CLK>;
-+ clock-names = "port_mac",
-+ "port_rx",
-+ "port_tx";
-+ resets = <&nsscc PORT6_MAC_ARES>,
-+ <&nsscc PORT6_RX_ARES>,
-+ <&nsscc PORT6_TX_ARES>;
-+ reset-names = "port_mac",
-+ "port_rx",
-+ "port_tx";
-+ };
-+ };
-+};
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -1256,7 +1256,7 @@
- #interconnect-cells = <1>;
- };
-
-- ethernet@3a000000 {
-+ qcom_ppe: ethernet@3a000000 {
- compatible = "qcom,ipq9574-ppe";
- reg = <0x3a000000 0xbef800>;
- ranges;
+++ /dev/null
-From 0c16deea166f6d890ac4aa9a73d28fc64fb26c3d Mon Sep 17 00:00:00 2001
-From: Alexandru Gagniuc <mr.nuke.me@gmail.com>
-Date: Sat, 10 May 2025 14:59:05 -0500
-Subject: [PATCH] net: pcs: ipq-uniphy: fix NULL pointer dereference in probe()
-
-In .probe(), the clocks are stored one-by-one in the priv->clk[]
-array. Later, they are dereferenced directly when calling
-clk_set_rate(). When the clock value is PTR_ERR instead of a valid
-pointer, the system crashes with a NULL pointer dereference.
-
-The problem is seen on IPQ9554, where uniphy1 is not present, and
-cannot be enabled:
-
- gcc_uniphy1_sys_clk status stuck at 'off'
- ...
- ipq_uniphy 7a10000.ethernet-uniphy: Failed to get the clock ID sys
- ...
- Unable to handle kernel read from unreadable memory at virtual address 000000000000002
-
-While disabling the uniphy1 node in devicetree also prevents the
-crash, fixing the driver logic is more generic. Abort .probe() if any
-of the clocks fail to resolve.
-
-Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
----
- drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
---- a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-+++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-@@ -1167,9 +1167,11 @@ static int ipq_uniphy_probe(struct platf
- priv->clk[i] = devm_clk_get_optional_enabled(dev,
- pcs_clock_name[i]);
-
-- if (IS_ERR(priv->clk[i]))
-+ if (IS_ERR(priv->clk[i])) {
- dev_err(dev, "Failed to get the clock ID %s\n",
- pcs_clock_name[i]);
-+ return PTR_ERR(priv->clk[i]);
-+ }
- }
-
- for (i = 0; i < PCS_RESET_MAX; i++) {
+++ /dev/null
-From 82a6de914d0d6ff82333a4b65c81f71335e1f9d0 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Wed, 4 Dec 2024 01:31:46 +0100
-Subject: [PATCH] dt-bindings: clock: Add clock ID for IPQ PCS UNIPHY
-
-IPQ9574 expose 3 PCS UNIPHY that expose all the same clock ID for RX and
-TX clock. This is needed to correctly parent the EDMA ports and scale
-to the correct frequency to permit correct function of attached PHY.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- include/dt-bindings/clock/qcom,ipq-pcs-uniphy.h | 13 +++++++++++++
- 1 file changed, 13 insertions(+)
- create mode 100644 include/dt-bindings/clock/qcom,ipq-pcs-uniphy.h
-
---- /dev/null
-+++ b/include/dt-bindings/clock/qcom,ipq-pcs-uniphy.h
-@@ -0,0 +1,13 @@
-+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-+
-+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ_PCS_UNIPHY_H
-+#define _DT_BINDINGS_CLK_QCOM_IPQ_PCS_UNIPHY_H
-+
-+/*
-+ * IPQ9574 expose 3 PCS UNIPHY that expose all
-+ * the same clock ID
-+ */
-+#define UNIPHY_NSS_RX_CLK 0
-+#define UNIPHY_NSS_TX_CLK 1
-+
-+#endif
+++ /dev/null
-From cb72c5119463897df2ba4a007b490e6251a15f75 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Wed, 4 Dec 2024 01:37:05 +0100
-Subject: [PATCH] net: ethernet: qualcomm: Add support for label property for
- EDMA port
-
-Add support for label property for EDMA port. This is useful to define
-custom name in DTS for specific ethernet port instead of assigning a
-dynamic name at runtime.
-
-This also improve the log output by using modern APIs.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- drivers/net/ethernet/qualcomm/ppe/edma_port.c | 18 +++++++++++++++---
- 1 file changed, 15 insertions(+), 3 deletions(-)
-
---- a/drivers/net/ethernet/qualcomm/ppe/edma_port.c
-+++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
-@@ -355,13 +355,25 @@ int edma_port_setup(struct ppe_port *por
- int port_id = port->port_id;
- struct net_device *netdev;
- u8 mac_addr[ETH_ALEN];
-+ const char *name;
-+ int assign_type;
- int ret = 0;
- u8 *maddr;
-
-- netdev = alloc_etherdev_mqs(sizeof(struct edma_port_priv),
-- EDMA_NETDEV_QUEUE_NUM, EDMA_NETDEV_QUEUE_NUM);
-+ name = of_get_property(np, "label", NULL);
-+ if (name) {
-+ assign_type = NET_NAME_PREDICTABLE;
-+ } else {
-+ name = "eth%d";
-+ assign_type = NET_NAME_ENUM;
-+ }
-+
-+ netdev = alloc_netdev_mqs(sizeof(struct edma_port_priv),
-+ name, assign_type,
-+ ether_setup,
-+ EDMA_NETDEV_QUEUE_NUM, EDMA_NETDEV_QUEUE_NUM);
- if (!netdev) {
-- pr_err("alloc_etherdev() failed\n");
-+ dev_err(ppe_dev->dev, "alloc_netdev_mqs() failed\n");
- return -ENOMEM;
- }
-
+++ /dev/null
-From a471ccfd5b2c7810506aac71d4eb3616a3fb18f9 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Wed, 4 Dec 2024 01:43:20 +0100
-Subject: [PATCH] arm64: dts: qcom: Add missing clock for nsscc from pcs uniphy
-
-Add missing clock for nsscc from PCS UNIPHY to scale frequency of each
-clock based on the requested PHY.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 13 +++++++------
- 1 file changed, 7 insertions(+), 6 deletions(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -14,6 +14,7 @@
- #include <dt-bindings/reset/qcom,ipq9574-gcc.h>
- #include <dt-bindings/clock/qcom,ipq9574-nsscc.h>
- #include <dt-bindings/reset/qcom,ipq9574-nsscc.h>
-+#include <dt-bindings/clock/qcom,ipq-pcs-uniphy.h>
- #include <dt-bindings/thermal/thermal.h>
-
- / {
-@@ -1243,12 +1244,12 @@
- <&cmn_pll NSS_1200MHZ_CLK>,
- <&cmn_pll PPE_353MHZ_CLK>,
- <&gcc GPLL0_OUT_AUX>,
-- <0>,
-- <0>,
-- <0>,
-- <0>,
-- <0>,
-- <0>,
-+ <&pcsuniphy0 UNIPHY_NSS_RX_CLK>,
-+ <&pcsuniphy0 UNIPHY_NSS_TX_CLK>,
-+ <&pcsuniphy1 UNIPHY_NSS_RX_CLK>,
-+ <&pcsuniphy1 UNIPHY_NSS_TX_CLK>,
-+ <&pcsuniphy2 UNIPHY_NSS_RX_CLK>,
-+ <&pcsuniphy2 UNIPHY_NSS_TX_CLK>,
- <&gcc GCC_NSSCC_CLK>;
- #clock-cells = <1>;
- #reset-cells = <1>;
+++ /dev/null
-From d9982d245fc7f1b7d65d74cd08d53eccdcbd5cf2 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Mon, 9 Dec 2024 17:50:31 +0100
-Subject: [PATCH 1/2] arm64: dts: qcom: add partition table for ipq9574 rdp
- common
-
-Add partition table for ipq9574 SoC common to every RDB board.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- .../boot/dts/qcom/ipq9574-rdp-common.dtsi | 146 +++++++++++++++++-
- 1 file changed, 145 insertions(+), 1 deletion(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-@@ -44,11 +44,155 @@
- status = "okay";
-
- flash@0 {
-- compatible = "micron,n25q128a11", "jedec,spi-nor";
-+ compatible = "jedec,spi-nor";
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
- spi-max-frequency = <50000000>;
-+
-+ partitions {
-+ compatible = "fixed-partitions";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+
-+ partition@0 {
-+ label = "0:sbl1";
-+ reg = <0x0 0xc0000>;
-+ read-only;
-+ };
-+
-+ partition@c0000 {
-+ label = "0:mibib";
-+ reg = <0xc0000 0x10000>;
-+ read-only;
-+ };
-+
-+ partition@d0000 {
-+ label = "0:bootconfig";
-+ reg = <0xd0000 0x20000>;
-+ read-only;
-+ };
-+
-+ partition@f0000 {
-+ label = "0:bootconfig1";
-+ reg = <0xf0000 0x20000>;
-+ read-only;
-+ };
-+
-+ partition@110000 {
-+ label = "0:qsee";
-+ reg = <0x110000 0x180000>;
-+ read-only;
-+ };
-+
-+ partition@290000 {
-+ label = "0:qsee_1";
-+ reg = <0x290000 0x180000>;
-+ read-only;
-+ };
-+
-+ partition@410000 {
-+ label = "0:devcfg";
-+ reg = <0x410000 0x10000>;
-+ read-only;
-+ };
-+
-+ partition@420000 {
-+ label = "0:devcfg_1";
-+ reg = <0x420000 0x10000>;
-+ read-only;
-+ };
-+
-+ partition@430000 {
-+ label = "0:apdp";
-+ reg = <0x430000 0x10000>;
-+ read-only;
-+ };
-+
-+ partition@440000 {
-+ label = "0:apdp_1";
-+ reg = <0x440000 0x10000>;
-+ read-only;
-+ };
-+
-+ partition@450000 {
-+ label = "0:tme";
-+ reg = <0x450000 0x40000>;
-+ read-only;
-+ };
-+
-+ partition@490000 {
-+ label = "0:tme_1";
-+ reg = <0x490000 0x40000>;
-+ read-only;
-+ };
-+
-+ partition@4d0000 {
-+ label = "0:rpm";
-+ reg = <0x4d0000 0x20000>;
-+ read-only;
-+ };
-+
-+ partition@4f0000 {
-+ label = "0:rpm_1";
-+ reg = <0x4f0000 0x20000>;
-+ read-only;
-+ };
-+
-+ partition@510000 {
-+ label = "0:cdt";
-+ reg = <0x510000 0x10000>;
-+ read-only;
-+ };
-+
-+ partition@520000 {
-+ label = "0:cdt_1";
-+ reg = <0x520000 0x10000>;
-+ read-only;
-+ };
-+
-+ partition@530000 {
-+ compatible = "u-boot,env";
-+ label = "0:appsblenv";
-+ reg = <0x530000 0x10000>;
-+
-+ macaddr_lan: ethaddr {
-+ #nvmem-cell-cells = <1>;
-+ };
-+ };
-+
-+ partition@540000 {
-+ label = "0:appsbl";
-+ reg = <0x540000 0xa0000>;
-+ read-only;
-+ };
-+
-+ partition@5e0000 {
-+ label = "0:appsbl_1";
-+ reg = <0x5e0000 0xa0000>;
-+ read-only;
-+ };
-+
-+ partition@680000 {
-+ label = "0:art";
-+ reg = <0x680000 0x100000>;
-+ read-only;
-+ };
-+
-+ partition@780000 {
-+ label = "0:ethphyfw";
-+ reg = <0x780000 0x80000>;
-+ read-only;
-+
-+ nvmem-layout {
-+ compatible = "fixed-layout";
-+
-+ aqr_fw: aqr-fw@0 {
-+ reg = <0x0 0x5fc02>;
-+ };
-+ };
-+ };
-+ };
- };
- };
-
+++ /dev/null
-From e7afacf09d39d7087a5ea112fefabb6d5d3adf2b Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Mon, 9 Dec 2024 18:10:43 +0100
-Subject: [PATCH] dts: qcom: add AQR NVMEM node for IPQ9574 RDP433 board
-
-Add Aquantia NVMEM node for IPQ9574 RDP433 board to load the firmware
-for the Aquantia PHY.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts | 6 ++++++
- 1 file changed, 6 insertions(+)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
-@@ -87,11 +87,17 @@
- phy4: ethernet-phy@8 {
- compatible ="ethernet-phy-ieee802.3-c45";
- reg = <8>;
-+
-+ nvmem-cells = <&aqr_fw>;
-+ nvmem-cell-names = "firmware";
- };
-
- phy5: ethernet-phy@0 {
- compatible ="ethernet-phy-ieee802.3-c45";
- reg = <0>;
-+
-+ nvmem-cells = <&aqr_fw>;
-+ nvmem-cell-names = "firmware";
- };
- };
-
+++ /dev/null
-From a0a04487554ea21db5eaf04f81fc159386a7477d Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Mon, 9 Dec 2024 18:19:06 +0100
-Subject: [PATCH] net: ethernet: qualcomm: ppe: Fix unmet dependency with
- QCOM_PPE
-
-Fix unmet dependency with QCOM_PPE on selecting SFP.
-
-WARNING: unmet direct dependencies detected for SFP
- Depends on [m]: NETDEVICES [=y] && PHYLIB [=y] && I2C [=y] && PHYLINK [=y] && (HWMON [=m] || HWMON [=m]=n [=n])
- Selected by [y]:
- - QCOM_PPE [=y] && NETDEVICES [=y] && ETHERNET [=y] && NET_VENDOR_QUALCOMM [=y] && HAS_IOMEM [=y] && OF [=y] && COMMON_CLK [=y]
-
-This permit correct compilation of the modules with SFP enabled.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- drivers/net/ethernet/qualcomm/Kconfig | 1 -
- 1 file changed, 1 deletion(-)
-
---- a/drivers/net/ethernet/qualcomm/Kconfig
-+++ b/drivers/net/ethernet/qualcomm/Kconfig
-@@ -68,7 +68,6 @@ config QCOM_PPE
- select REGMAP_MMIO
- select PHYLINK
- select PCS_QCOM_IPQ_UNIPHY
-- select SFP
- help
- This driver supports the Qualcomm Technologies, Inc. packet
- process engine (PPE) available with IPQ SoC. The PPE houses
+++ /dev/null
-From 2f328bd852cbb27cf0d2cad1727d8fb7a69abe87 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Thu, 30 Jan 2025 00:39:30 +0100
-Subject: [PATCH 2/2] arm64: dts: qcom: ipq9574: add QPIC SPI NAND default
- partition nodes
-
-Add QPIC SPI NAND default partition nodes for RDP reference board.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- .../boot/dts/qcom/ipq9574-rdp-common.dtsi | 28 +++++++++++++++++++
- 1 file changed, 28 insertions(+)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-@@ -281,6 +281,34 @@
- nand-ecc-engine = <&qpic_nand>;
- nand-ecc-strength = <4>;
- nand-ecc-step-size = <512>;
-+
-+ partitions {
-+ compatible = "fixed-partitions";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+
-+ partition@0 {
-+ label = "0:training";
-+ reg = <0x0 0x80000>;
-+ read-only;
-+ };
-+
-+ partition@80000 {
-+ label = "0:license";
-+ reg = <0x80000 0x40000>;
-+ read-only;
-+ };
-+
-+ partition@c0000 {
-+ label = "rootfs";
-+ reg = <0xc0000 0x3c00000>;
-+ };
-+
-+ partition@3cc0000 {
-+ label = "rootfs_1";
-+ reg = <0x3cc0000 0x3c00000>;
-+ };
-+ };
- };
- };
-
+++ /dev/null
-From afba5111aed03a05aa7fd46d3d9911319fa87a29 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Thu, 30 Jan 2025 16:07:14 +0100
-Subject: [PATCH 1/3] PM: runtime: add of_pm_clk_add_clk_index OP variant
-
-Add of_pm_clk_add_clk_index OP variant of of_pm_clk_add_clk to take as
-argument the clock index in DT instead of the name. This is to handle
-case where clock-names property is not used by the node but clocks are
-referenced with a dt-binding header or internally in the driver.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- drivers/base/power/clock_ops.c | 31 +++++++++++++++++++++++++++++++
- include/linux/pm_clock.h | 1 +
- 2 files changed, 32 insertions(+)
-
---- a/drivers/base/power/clock_ops.c
-+++ b/drivers/base/power/clock_ops.c
-@@ -259,6 +259,37 @@ int pm_clk_add_clk(struct device *dev, s
- }
- EXPORT_SYMBOL_GPL(pm_clk_add_clk);
-
-+/**
-+ * of_pm_clk_add_clk_index - Start using a device clock for power management.
-+ * @dev: Device whose clock is going to be used for power management.
-+ * @index: Index of clock that is going to be used for power management.
-+ *
-+ * Add the clock described in the 'clocks' device-tree node at the index
-+ * provided, to the list of clocks used for the power management of @dev.
-+ * On success, returns 0. Returns a negative error code if the clock is not
-+ * found or cannot be added.
-+ */
-+int of_pm_clk_add_clk_index(struct device *dev, int index)
-+{
-+ struct clk *clk;
-+ int ret;
-+
-+ if (!dev || !dev->of_node || index < 0)
-+ return -EINVAL;
-+
-+ clk = of_clk_get(dev->of_node, index);
-+ if (IS_ERR(clk))
-+ return PTR_ERR(clk);
-+
-+ ret = pm_clk_add_clk(dev, clk);
-+ if (ret) {
-+ clk_put(clk);
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(of_pm_clk_add_clk_index);
-
- /**
- * of_pm_clk_add_clk - Start using a device clock for power management.
---- a/include/linux/pm_clock.h
-+++ b/include/linux/pm_clock.h
-@@ -41,6 +41,7 @@ extern int pm_clk_create(struct device *
- extern void pm_clk_destroy(struct device *dev);
- extern int pm_clk_add(struct device *dev, const char *con_id);
- extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
-+extern int of_pm_clk_add_clk_index(struct device *dev, int index);
- extern int of_pm_clk_add_clk(struct device *dev, const char *name);
- extern int of_pm_clk_add_clks(struct device *dev);
- extern void pm_clk_remove(struct device *dev, const char *con_id);
+++ /dev/null
-From 9408076fd9e4d41876af41523cad9bfa77b3a557 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Thu, 30 Jan 2025 16:11:14 +0100
-Subject: [PATCH 2/3] clk: qcom: nsscc: Attach required NSSNOC clock to PM
- domain
-
-There is currently a problem with ICC clock disabling the NSSNOC clock
-as there isn't any user for them on calling sync_state.
-This cause the kernel to stall if NSS is enabled and reboot with the watchdog.
-
-This is caused by the fact that the NSSNOC clock nsscc, snoc and snoc_1
-are actually required to make the NSS work and make the system continue
-booting.
-
-To attach these clock, setup pm-clk in nsscc and setup the correct
-resume/suspend OPs.
-
-With this change, the clock gets correctly attached and are not disabled
-when ICC call the sync_state.
-
-Suggested-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- drivers/clk/qcom/nsscc-ipq9574.c | 49 +++++++++++++++++++++++++++++++-
- 1 file changed, 48 insertions(+), 1 deletion(-)
-
---- a/drivers/clk/qcom/nsscc-ipq9574.c
-+++ b/drivers/clk/qcom/nsscc-ipq9574.c
-@@ -12,6 +12,8 @@
- #include <linux/module.h>
- #include <linux/of.h>
- #include <linux/of_device.h>
-+#include <linux/pm_clock.h>
-+#include <linux/pm_runtime.h>
- #include <linux/regmap.h>
- #include <linux/platform_device.h>
-
-@@ -41,6 +43,9 @@ enum {
- DT_UNIPHY1_NSS_TX_CLK,
- DT_UNIPHY2_NSS_RX_CLK,
- DT_UNIPHY2_NSS_TX_CLK,
-+ DT_GCC_NSSNOC_NSSCC_CLK,
-+ DT_GCC_NSSNOC_SNOC_CLK,
-+ DT_GCC_NSSNOC_SNOC_1_CLK,
- };
-
- enum {
-@@ -3046,6 +3051,10 @@ static const struct qcom_cc_desc nss_cc_
- .icc_first_node_id = IPQ_NSSCC_ID,
- };
-
-+static const struct dev_pm_ops nsscc_pm_ops = {
-+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
-+};
-+
- static const struct of_device_id nss_cc_ipq9574_match_table[] = {
- { .compatible = "qcom,ipq9574-nsscc" },
- { }
-@@ -3054,7 +3063,33 @@ MODULE_DEVICE_TABLE(of, nss_cc_ipq9574_m
-
- static int nss_cc_ipq9574_probe(struct platform_device *pdev)
- {
-+ struct device *dev = &pdev->dev;
- struct regmap *regmap;
-+ int ret;
-+
-+ ret = devm_pm_runtime_enable(dev);
-+ if (ret)
-+ return ret;
-+
-+ ret = devm_pm_clk_create(dev);
-+ if (ret)
-+ return ret;
-+
-+ ret = of_pm_clk_add_clk_index(dev, DT_GCC_NSSNOC_NSSCC_CLK);
-+ if (ret)
-+ return dev_err_probe(dev, ret,"failed to acquire nssnoc clock\n");
-+
-+ ret = of_pm_clk_add_clk_index(dev, DT_GCC_NSSNOC_SNOC_CLK);
-+ if (ret)
-+ return dev_err_probe(dev, ret,"failed to acquire snoc clock\n");
-+
-+ ret = of_pm_clk_add_clk_index(dev, DT_GCC_NSSNOC_SNOC_1_CLK);
-+ if (ret)
-+ return dev_err_probe(dev, ret,"failed to acquire snoc_1 clock\n");
-+
-+ ret = pm_runtime_resume_and_get(dev);
-+ if (ret)
-+ return ret;
-
- regmap = qcom_cc_map(pdev, &nss_cc_ipq9574_desc);
- if (IS_ERR(regmap))
-@@ -3062,7 +3097,18 @@ static int nss_cc_ipq9574_probe(struct p
-
- clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
-
-- return qcom_cc_really_probe(&pdev->dev, &nss_cc_ipq9574_desc, regmap);
-+ ret = qcom_cc_really_probe(dev, &nss_cc_ipq9574_desc, regmap);
-+ if (ret)
-+ goto err_put_pm;
-+
-+ pm_runtime_put(dev);
-+
-+ return 0;
-+
-+err_put_pm:
-+ pm_runtime_put_sync(dev);
-+
-+ return ret;
- }
-
- static struct platform_driver nss_cc_ipq9574_driver = {
-@@ -3071,6 +3117,7 @@ static struct platform_driver nss_cc_ipq
- .name = "qcom,nsscc-ipq9574",
- .of_match_table = nss_cc_ipq9574_match_table,
- .sync_state = icc_sync_state,
-+ .pm = &nsscc_pm_ops,
- },
- };
-
+++ /dev/null
-From 893fda72edd2a0b3d92be41af417d315c9c5c253 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Thu, 30 Jan 2025 16:23:03 +0100
-Subject: [PATCH 3/3] arm64: dts: qcom: ipq9574: add NSSNOC clock to nss node
-
-Add NSSNOC clock to nss node to attach the clock with PM clock and fix
-the boot stall after ICC sync_state.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -1250,7 +1250,9 @@
- <&pcsuniphy1 UNIPHY_NSS_TX_CLK>,
- <&pcsuniphy2 UNIPHY_NSS_RX_CLK>,
- <&pcsuniphy2 UNIPHY_NSS_TX_CLK>,
-- <&gcc GCC_NSSCC_CLK>;
-+ <&gcc GCC_NSSNOC_NSSCC_CLK>,
-+ <&gcc GCC_NSSNOC_SNOC_CLK>,
-+ <&gcc GCC_NSSNOC_SNOC_1_CLK>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- #power-domain-cells = <1>;
+++ /dev/null
-From 145aa2977a42b97d052ed0984fb305a853f55d49 Mon Sep 17 00:00:00 2001
-From: Mantas Pucka <mantas@8devices.com>
-Date: Wed, 11 Apr 2025 15:14:19 +0300
-Subject: [PATCH] clk: qcom: nsscc-ipq9574: enable bus clock
-
-Enable bus clock, otherwise nsscc registers are unaccessible.
-
-Signed-off-by: Mantas Pucka <mantas@8devices.com>
----
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 3 +-
- drivers/clk/qcom/nsscc-ipq9574.c | 5 +
- 2 file changed, 7 insertions(+), 1 deletion(-)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -1252,7 +1252,8 @@
- <&pcsuniphy2 UNIPHY_NSS_TX_CLK>,
- <&gcc GCC_NSSNOC_NSSCC_CLK>,
- <&gcc GCC_NSSNOC_SNOC_CLK>,
-- <&gcc GCC_NSSNOC_SNOC_1_CLK>;
-+ <&gcc GCC_NSSNOC_SNOC_1_CLK>,
-+ <&gcc GCC_NSSCC_CLK>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- #power-domain-cells = <1>;
---- a/drivers/clk/qcom/nsscc-ipq9574.c
-+++ b/drivers/clk/qcom/nsscc-ipq9574.c
-@@ -46,6 +46,7 @@ enum {
- DT_GCC_NSSNOC_NSSCC_CLK,
- DT_GCC_NSSNOC_SNOC_CLK,
- DT_GCC_NSSNOC_SNOC_1_CLK,
-+ DT_GCC_NSS_BUS_CLK,
- };
-
- enum {
-@@ -3075,6 +3076,10 @@ static int nss_cc_ipq9574_probe(struct p
- if (ret)
- return ret;
-
-+ ret = of_pm_clk_add_clk_index(dev, DT_GCC_NSS_BUS_CLK);
-+ if (ret)
-+ return dev_err_probe(&pdev->dev, ret, "Fail to add bus clock\n");
-+
- ret = of_pm_clk_add_clk_index(dev, DT_GCC_NSSNOC_NSSCC_CLK);
- if (ret)
- return dev_err_probe(dev, ret,"failed to acquire nssnoc clock\n");
+++ /dev/null
-From ce4c7eea1b6f05723240aadc5e1c240d26a6ef88 Mon Sep 17 00:00:00 2001
-From: Mantas Pucka <mantas@8devices.com>
-Date: Mon, 31 Mar 2025 15:39:59 +0300
-Subject: [PATCH] clk: qcom: nsscc-ipq9574: fix port5 clock config
-
-Currently there is no configuration to derive 25/125MHz port5 clock
-from uniphy1 running at 125MHz. This is needed for SGMII mode when
-port5 is using uniphy1.
-
-Fix this by adding option such clock config option.
-
-Signed-off-by: Mantas Pucka <mantas@8devices.com>
----
- drivers/clk/qcom/nsscc-ipq9574.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
---- a/drivers/clk/qcom/nsscc-ipq9574.c
-+++ b/drivers/clk/qcom/nsscc-ipq9574.c
-@@ -387,11 +387,13 @@ static const struct freq_multi_tbl ftbl_
-
- static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_25[] = {
- C(P_UNIPHY1_NSS_RX_CLK, 12.5, 0, 0),
-+ C(P_UNIPHY1_NSS_RX_CLK, 5, 0, 0),
- C(P_UNIPHY0_NSS_RX_CLK, 5, 0, 0),
- };
-
- static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_125[] = {
- C(P_UNIPHY1_NSS_RX_CLK, 2.5, 0, 0),
-+ C(P_UNIPHY1_NSS_RX_CLK, 1, 0, 0),
- C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
- };
-
-@@ -412,11 +414,13 @@ static const struct freq_multi_tbl ftbl_
-
- static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_25[] = {
- C(P_UNIPHY1_NSS_TX_CLK, 12.5, 0, 0),
-+ C(P_UNIPHY1_NSS_TX_CLK, 5, 0, 0),
- C(P_UNIPHY0_NSS_TX_CLK, 5, 0, 0),
- };
-
- static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_125[] = {
- C(P_UNIPHY1_NSS_TX_CLK, 2.5, 0, 0),
-+ C(P_UNIPHY1_NSS_TX_CLK, 1, 0, 0),
- C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
- };
-
+++ /dev/null
-From 4c432babdc195a0dbef70ca67c92cec8adf01e30 Mon Sep 17 00:00:00 2001
-From: Mantas Pucka <mantas@8devices.com>
-Date: Fri, 28 Mar 2025 14:22:21 +0200
-Subject: [PATCH 5/6] net: pcs: ipq-uniphy: keep autoneg enabled in SGMII mode
-
-For PHYs that don't use in-band-status (e.g. 2.5G PHY swiching between
-SGMII and 2500base-x), SGMII autoneg still must be enabled. Only mode
-that should use forced speed is 1000base-x
-
-Signed-off-by: Mantas Pucka <mantas@8devices.com>
----
- drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 9 +++++----
- 1 file changed, 5 insertions(+), 4 deletions(-)
-
---- a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-+++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-@@ -520,7 +520,7 @@ static int ipq_unipcs_config_sgmii(struc
- mutex_unlock(&qunipcs->shared_lock);
-
- /* In-band autoneg mode is enabled by default for each PCS channel */
-- if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
-+ if (interface != PHY_INTERFACE_MODE_1000BASEX)
- return 0;
-
- /* Force speed mode */
-@@ -758,10 +758,11 @@ ipq_unipcs_link_up_clock_rate_set(struct
- static void ipq_unipcs_link_up_config_sgmii(struct ipq_uniphy_pcs *qunipcs,
- int channel,
- unsigned int neg_mode,
-- int speed)
-+ int speed,
-+ phy_interface_t interface)
- {
- /* No need to config PCS speed if in-band autoneg is enabled */
-- if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
-+ if (interface != PHY_INTERFACE_MODE_1000BASEX)
- goto pcs_adapter_reset;
-
- /* PCS speed set for force mode */
-@@ -966,7 +967,7 @@ static void ipq_unipcs_link_up(struct ph
- case PHY_INTERFACE_MODE_PSGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- ipq_unipcs_link_up_config_sgmii(qunipcs, channel,
-- neg_mode, speed);
-+ neg_mode, speed, interface);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- ipq_unipcs_link_up_config_2500basex(qunipcs,
+++ /dev/null
-From 3bbf1aad312de653b894c2e60ea1b37ce912c6fe Mon Sep 17 00:00:00 2001
-From: Mantas Pucka <mantas@8devices.com>
-Date: Fri, 28 Mar 2025 14:10:22 +0200
-Subject: [PATCH 3/6] net: pcs: ipq-uniphy: control MISC2 register for 2.5G
- support
-
-When 2500base-x mode is enabled MISC2 regsister needs to have different
-value than for other 1G modes.
-
-Signed-off-by: Mantas Pucka <mantas@8devices.com>
----
- drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 16 ++++++++++++++++
- 1 file changed, 16 insertions(+)
-
---- a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-+++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-@@ -20,6 +20,11 @@
- #define PCS_CALIBRATION 0x1e0
- #define PCS_CALIBRATION_DONE BIT(7)
-
-+#define PCS_MISC2 0x218
-+#define PCS_MISC2_MODE_MASK GENMASK(6, 5)
-+#define PCS_MISC2_MODE_SGMII FIELD_PREP(PCS_MISC2_MODE_MASK, 0x1)
-+#define PCS_MISC2_MODE_SGMII_PLUS FIELD_PREP(PCS_MISC2_MODE_MASK, 0x2)
-+
- #define PCS_MODE_CTRL 0x46c
- #define PCS_MODE_SEL_MASK GENMASK(12, 8)
- #define PCS_MODE_SGMII FIELD_PREP(PCS_MODE_SEL_MASK, 0x4)
-@@ -422,6 +427,9 @@ static int ipq_unipcs_config_mode(struct
- ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
- PCS_MODE_SEL_MASK | PCS_MODE_AN_MODE,
- PCS_MODE_SGMII);
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_MISC2,
-+ PCS_MISC2_MODE_MASK,
-+ PCS_MISC2_MODE_SGMII);
- break;
- case PHY_INTERFACE_MODE_QSGMII:
- rate = 125000000;
-@@ -438,17 +446,25 @@ static int ipq_unipcs_config_mode(struct
- PCS_MODE_PSGMII);
- break;
- case PHY_INTERFACE_MODE_1000BASEX:
-+ rate = 125000000;
- ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
- PCS_MODE_SEL_MASK |
- PCS_MODE_SGMII_CTRL_MASK,
- PCS_MODE_SGMII |
- PCS_MODE_SGMII_CTRL_1000BASEX);
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_MISC2,
-+ PCS_MISC2_MODE_MASK,
-+ PCS_MISC2_MODE_SGMII);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- rate = 312500000;
- ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
- PCS_MODE_SEL_MASK,
- PCS_MODE_SGMII_PLUS);
-+ ipq_unipcs_reg_modify32(qunipcs, PCS_MISC2,
-+ PCS_MISC2_MODE_MASK,
-+ PCS_MISC2_MODE_SGMII_PLUS);
-+
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_10GBASER:
+++ /dev/null
-From d75aa2977a42b97d052ed0984fb305a853f55d49 Mon Sep 17 00:00:00 2001
-From: Mantas Pucka <mantas@8devices.com>
-Date: Wed, 9 Apr 2025 11:16:49 +0300
-Subject: [PATCH] net: pcs: ipq-uniphy: fix USXGMII link-up failure
-
-USXGMII link-up may fail due to too short delay after PLL reset.
-Increase the delay to fix this.
-
-Signed-off-by: Mantas Pucka <mantas@8devices.com>
----
- drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-+++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
-@@ -490,7 +490,7 @@ static int ipq_unipcs_config_mode(struct
-
- /* PCS PLL reset */
- ipq_unipcs_reg_modify32(qunipcs, PCS_PLL_RESET, PCS_ANA_SW_RESET, 0);
-- fsleep(10000);
-+ fsleep(20000);
- ipq_unipcs_reg_modify32(qunipcs, PCS_PLL_RESET,
- PCS_ANA_SW_RESET, PCS_ANA_SW_RESET);
-
+++ /dev/null
-From a6118ebc98ec0081064ccc01d0d23f029a504d71 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Wed, 4 Dec 2024 01:49:09 +0100
-Subject: [PATCH] arm64: dts: qcom: Add label to EDMA port for IPQ9574 RDP433
-
-Add label to EDMA port for IPQ9574 RDP433 board.
-
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
----
- arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts | 6 ++++++
- 1 file changed, 6 insertions(+)
-
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
-@@ -229,6 +229,7 @@
- reg = <1>;
- phy-mode = "qsgmii";
- managed = "in-band-status";
-+ label = "lan1";
- phy-handle = <&phy0>;
- pcs-handle = <&pcsuniphy0_ch0>;
- clocks = <&nsscc NSS_CC_PORT1_MAC_CLK>,
-@@ -249,6 +250,7 @@
- reg = <2>;
- phy-mode = "qsgmii";
- managed = "in-band-status";
-+ label = "lan2";
- phy-handle = <&phy1>;
- pcs-handle = <&pcsuniphy0_ch1>;
- clocks = <&nsscc NSS_CC_PORT2_MAC_CLK>,
-@@ -269,6 +271,7 @@
- reg = <3>;
- phy-mode = "qsgmii";
- managed = "in-band-status";
-+ label = "lan3";
- phy-handle = <&phy2>;
- pcs-handle = <&pcsuniphy0_ch2>;
- clocks = <&nsscc NSS_CC_PORT3_MAC_CLK>,
-@@ -289,6 +292,7 @@
- reg = <4>;
- phy-mode = "qsgmii";
- managed = "in-band-status";
-+ label = "lan4";
- phy-handle = <&phy3>;
- pcs-handle = <&pcsuniphy0_ch3>;
- clocks = <&nsscc NSS_CC_PORT4_MAC_CLK>,
-@@ -309,6 +313,7 @@
- reg = <5>;
- phy-mode = "usxgmii";
- managed = "in-band-status";
-+ label = "lan5";
- phy-handle = <&phy4>;
- pcs-handle = <&pcsuniphy1_ch0>;
- clocks = <&nsscc NSS_CC_PORT5_MAC_CLK>,
-@@ -329,6 +334,7 @@
- reg = <6>;
- phy-mode = "usxgmii";
- managed = "in-band-status";
-+ label = "wan";
- phy-handle = <&phy5>;
- pcs-handle = <&pcsuniphy2_ch0>;
- clocks = <&nsscc NSS_CC_PORT6_MAC_CLK>,