]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/arm-smmu: Add support for PRR bit setup
authorBibek Kumar Patro <quic_bibekkum@quicinc.com>
Thu, 12 Dec 2024 15:14:00 +0000 (20:44 +0530)
committerWill Deacon <will@kernel.org>
Tue, 7 Jan 2025 13:55:07 +0000 (13:55 +0000)
Add an adreno-smmu-priv interface for drm/msm to call into arm-smmu-qcom
and initiate the "Partially Resident Region" (PRR) bit setup or reset
sequence as per request.

This will be used by GPU to setup the PRR bit and related configuration
registers through adreno-smmu private interface instead of directly
poking the smmu hardware.

Suggested-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Bibek Kumar Patro <quic_bibekkum@quicinc.com>
Link: https://lore.kernel.org/r/20241212151402.159102-4-quic_bibekkum@quicinc.com
Signed-off-by: Will Deacon <will@kernel.org>
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
drivers/iommu/arm/arm-smmu/arm-smmu.h
include/linux/adreno-smmu-priv.h

index 5f3b5dfdcf05b772d6a6630803a77e8295c39f65..5a32d5dcbc8673a2d25022f4fb16740a62ba6c76 100644 (file)
@@ -16,6 +16,8 @@
 
 #define QCOM_DUMMY_VAL -1
 
+#define GFX_ACTLR_PRR          (1 << 5)
+
 static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
 {
        return container_of(smmu, struct qcom_smmu, smmu);
@@ -99,6 +101,47 @@ static void qcom_adreno_smmu_resume_translation(const void *cookie, bool termina
        arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
 }
 
+static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set)
+{
+       struct arm_smmu_domain *smmu_domain = (void *)cookie;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+       u32 reg = 0;
+       int ret;
+
+       ret = pm_runtime_resume_and_get(smmu->dev);
+       if (ret < 0) {
+               dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret);
+               return;
+       }
+
+       reg =  arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR);
+       reg &= ~GFX_ACTLR_PRR;
+       if (set)
+               reg |= FIELD_PREP(GFX_ACTLR_PRR, 1);
+       arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR, reg);
+       pm_runtime_put_autosuspend(smmu->dev);
+}
+
+static void qcom_adreno_smmu_set_prr_addr(const void *cookie, phys_addr_t page_addr)
+{
+       struct arm_smmu_domain *smmu_domain = (void *)cookie;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       int ret;
+
+       ret = pm_runtime_resume_and_get(smmu->dev);
+       if (ret < 0) {
+               dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret);
+               return;
+       }
+
+       writel_relaxed(lower_32_bits(page_addr),
+                               smmu->base + ARM_SMMU_GFX_PRR_CFG_LADDR);
+       writel_relaxed(upper_32_bits(page_addr),
+                               smmu->base + ARM_SMMU_GFX_PRR_CFG_UADDR);
+       pm_runtime_put_autosuspend(smmu->dev);
+}
+
 #define QCOM_ADRENO_SMMU_GPU_SID 0
 
 static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
@@ -210,6 +253,7 @@ static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
 static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
                struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
 {
+       const struct device_node *np = smmu_domain->smmu->dev->of_node;
        struct adreno_smmu_priv *priv;
 
        smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
@@ -239,6 +283,14 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
        priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
        priv->set_stall = qcom_adreno_smmu_set_stall;
        priv->resume_translation = qcom_adreno_smmu_resume_translation;
+       priv->set_prr_bit = NULL;
+       priv->set_prr_addr = NULL;
+
+       if (of_device_is_compatible(np, "qcom,smmu-500") &&
+                       of_device_is_compatible(np, "qcom,adreno-smmu")) {
+               priv->set_prr_bit = qcom_adreno_smmu_set_prr_bit;
+               priv->set_prr_addr = qcom_adreno_smmu_set_prr_addr;
+       }
 
        return 0;
 }
index e2aeb511ae903302e3c15d2cf5f22e2a26ac2346..2dbf3243b5ad2db01e17fb26c26c838942a491be 100644 (file)
@@ -154,6 +154,8 @@ enum arm_smmu_cbar_type {
 #define ARM_SMMU_SCTLR_M               BIT(0)
 
 #define ARM_SMMU_CB_ACTLR              0x4
+#define ARM_SMMU_GFX_PRR_CFG_LADDR     0x6008
+#define ARM_SMMU_GFX_PRR_CFG_UADDR     0x600C
 
 #define ARM_SMMU_CB_RESUME             0x8
 #define ARM_SMMU_RESUME_TERMINATE      BIT(0)
index c637e0997f6d8eedcc42a03a9d303700f62f8cf2..abec23c7744f49bea70f3352da9385304ed3702e 100644 (file)
@@ -50,6 +50,11 @@ struct adreno_smmu_fault_info {
  *                 the GPU driver must call resume_translation()
  * @resume_translation: Resume translation after a fault
  *
+ * @set_prr_bit:   [optional] Configure the GPU's Partially Resident
+ *                 Region (PRR) bit in the ACTLR register.
+ * @set_prr_addr:  [optional] Configure the PRR_CFG_*ADDR register with
+ *                 the physical address of PRR page passed from GPU
+ *                 driver.
  *
  * The GPU driver (drm/msm) and adreno-smmu work together for controlling
  * the GPU's SMMU instance.  This is by necessity, as the GPU is directly
@@ -67,6 +72,8 @@ struct adreno_smmu_priv {
     void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info);
     void (*set_stall)(const void *cookie, bool enabled);
     void (*resume_translation)(const void *cookie, bool terminate);
+    void (*set_prr_bit)(const void *cookie, bool set);
+    void (*set_prr_addr)(const void *cookie, phys_addr_t page_addr);
 };
 
 #endif /* __ADRENO_SMMU_PRIV_H */