#define QCOM_DUMMY_VAL -1
+#define GFX_ACTLR_PRR (1 << 5)
+
static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
{
return container_of(smmu, struct qcom_smmu, smmu);
arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
}
+static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ u32 reg = 0;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(smmu->dev);
+ if (ret < 0) {
+ dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret);
+ return;
+ }
+
+ reg = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR);
+ reg &= ~GFX_ACTLR_PRR;
+ if (set)
+ reg |= FIELD_PREP(GFX_ACTLR_PRR, 1);
+ arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR, reg);
+ pm_runtime_put_autosuspend(smmu->dev);
+}
+
+static void qcom_adreno_smmu_set_prr_addr(const void *cookie, phys_addr_t page_addr)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(smmu->dev);
+ if (ret < 0) {
+ dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret);
+ return;
+ }
+
+ writel_relaxed(lower_32_bits(page_addr),
+ smmu->base + ARM_SMMU_GFX_PRR_CFG_LADDR);
+ writel_relaxed(upper_32_bits(page_addr),
+ smmu->base + ARM_SMMU_GFX_PRR_CFG_UADDR);
+ pm_runtime_put_autosuspend(smmu->dev);
+}
+
#define QCOM_ADRENO_SMMU_GPU_SID 0
static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
+ const struct device_node *np = smmu_domain->smmu->dev->of_node;
struct adreno_smmu_priv *priv;
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
priv->set_stall = qcom_adreno_smmu_set_stall;
priv->resume_translation = qcom_adreno_smmu_resume_translation;
+ priv->set_prr_bit = NULL;
+ priv->set_prr_addr = NULL;
+
+ if (of_device_is_compatible(np, "qcom,smmu-500") &&
+ of_device_is_compatible(np, "qcom,adreno-smmu")) {
+ priv->set_prr_bit = qcom_adreno_smmu_set_prr_bit;
+ priv->set_prr_addr = qcom_adreno_smmu_set_prr_addr;
+ }
return 0;
}
* the GPU driver must call resume_translation()
* @resume_translation: Resume translation after a fault
*
+ * @set_prr_bit: [optional] Configure the GPU's Partially Resident
+ * Region (PRR) bit in the ACTLR register.
+ * @set_prr_addr: [optional] Configure the PRR_CFG_*ADDR register with
+ * the physical address of PRR page passed from GPU
+ * driver.
*
* The GPU driver (drm/msm) and adreno-smmu work together for controlling
* the GPU's SMMU instance. This is by necessity, as the GPU is directly
void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info);
void (*set_stall)(const void *cookie, bool enabled);
void (*resume_translation)(const void *cookie, bool terminate);
+ void (*set_prr_bit)(const void *cookie, bool set);
+ void (*set_prr_addr)(const void *cookie, phys_addr_t page_addr);
};
#endif /* __ADRENO_SMMU_PRIV_H */