Now aligns with the xe_guc_ct_send naming.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Philippe Lecluse <philippe.lecluse1@gmail.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
XE_GUC_ACTION_CLIENT_SOFT_RESET,
};
- ret = xe_guc_send_mmio(guc, action, ARRAY_SIZE(action));
+ ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
if (ret) {
drm_err(&guc_to_xe(guc)->drm,
"GuC suspend: CLIENT_SOFT_RESET fail: %d!\n", ret);
#define MEDIA_SOFT_SCRATCH(n) _MMIO(0x190310 + (n) * 4)
#define MEDIA_SOFT_SCRATCH_COUNT 4
-int xe_guc_send_mmio(struct xe_guc *guc, const u32 *request, u32 len)
+int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
XE_BUG_ON(len == 1 && upper_32_bits(val));
/* Self config must go over MMIO */
- ret = xe_guc_send_mmio(guc, request, ARRAY_SIZE(request));
+ ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
if (unlikely(ret < 0))
return ret;
int xe_guc_suspend(struct xe_guc *guc);
void xe_guc_notify(struct xe_guc *guc);
int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr);
-int xe_guc_send_mmio(struct xe_guc *guc, const u32 *request, u32 len);
+int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len);
int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val);
int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val);
void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir);
enable ? GUC_CTB_CONTROL_ENABLE :
GUC_CTB_CONTROL_DISABLE),
};
- int ret = xe_guc_send_mmio(ct_to_guc(ct), request, ARRAY_SIZE(request));
+ int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
return ret > 0 ? -EPROTO : ret;
}
size,
};
- return xe_guc_send_mmio(guc, action, ARRAY_SIZE(action));
+ return xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
}
static int guc_hwconfig_size(struct xe_guc *guc, u32 *size)