return 0;
}
+static bool a6xx_aqe_is_enabled(struct adreno_gpu *adreno_gpu)
+{
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /*
+ * AQE uses preemption context record as scratch pad, so check if
+ * preemption is enabled
+ */
+ return (adreno_gpu->base.nr_rings > 1) && !!a6xx_gpu->aqe_bo;
+}
+
static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
.bus_halt = a6xx_bus_clear_pending_transactions,
.mmu_fault_handler = a6xx_fault_handler,
.gx_is_on = a7xx_gmu_gx_is_on,
+ .aqe_is_enabled = a6xx_aqe_is_enabled,
};
const struct adreno_gpu_funcs a8xx_gpu_funcs = {
.bus_halt = a8xx_bus_clear_pending_transactions,
.mmu_fault_handler = a8xx_fault_handler,
.gx_is_on = a8xx_gmu_gx_is_on,
+ .aqe_is_enabled = a6xx_aqe_is_enabled,
};
case MSM_PARAM_HAS_PRR:
*value = adreno_smmu_has_prr(gpu);
return 0;
+ case MSM_PARAM_AQE:
+ *value = !!(adreno_gpu->funcs->aqe_is_enabled &&
+ adreno_gpu->funcs->aqe_is_enabled(adreno_gpu));
+ return 0;
default:
return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
void (*bus_halt)(struct adreno_gpu *adreno_gpu, bool gx_off);
int (*mmu_fault_handler)(void *arg, unsigned long iova, int flags, void *data);
bool (*gx_is_on)(struct adreno_gpu *adreno_gpu);
+ bool (*aqe_is_enabled)(struct adreno_gpu *adreno_gpu);
};
struct adreno_reglist {
* ioctl will throw -EPIPE.
*/
#define MSM_PARAM_EN_VM_BIND 0x16 /* WO, once */
+#define MSM_PARAM_AQE 0x17 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #