int intel_connector_register(struct drm_connector *_connector)
{
struct intel_connector *connector = to_intel_connector(_connector);
- struct drm_i915_private *i915 = to_i915(_connector->dev);
int ret;
ret = intel_panel_register(connector);
if (ret)
- goto err;
-
- if (i915_inject_probe_failure(i915)) {
- ret = -EFAULT;
- goto err_panel;
- }
+ return ret;
intel_connector_debugfs_add(connector);
return 0;
-
-err_panel:
- intel_panel_unregister(connector);
-err:
- return ret;
}
+ALLOW_ERROR_INJECTION(intel_connector_register, ERRNO);
void intel_connector_unregister(struct drm_connector *_connector)
{
/* part #1: call before irq install */
int intel_display_driver_probe_noirq(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (i915_inject_probe_failure(i915))
- return -ENODEV;
-
if (HAS_DISPLAY(display)) {
ret = drm_vblank_init(display->drm,
INTEL_NUM_PIPES(display));
return ret;
}
+ALLOW_ERROR_INJECTION(intel_display_driver_probe_noirq, ERRNO);
static void set_display_access(struct intel_display *display,
bool any_task_allowed,
drm_WARN_ON(&i915->drm, engine_mask &
GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
- if (i915_inject_probe_failure(i915))
- return -ENODEV;
-
for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
setup_logical_ids(gt, logical_ids, class);
intel_engines_free(gt);
return err;
}
+ALLOW_ERROR_INJECTION(intel_engines_init_mmio, ERRNO);
void intel_engine_init_execlists(struct intel_engine_cs *engine)
{
{
int err;
- err = i915_inject_probe_error(gt->i915, -ENODEV);
- if (err)
- return err;
-
intel_gt_init_workarounds(gt);
/*
if (err)
goto err_gt;
- err = i915_inject_probe_error(gt->i915, -EIO);
- if (err)
- goto err_gt;
-
intel_uc_init_late(>->uc);
intel_migrate_init(>->migrate, gt);
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
return err;
}
+ALLOW_ERROR_INJECTION(intel_gt_init, ERRNO);
void intel_gt_driver_remove(struct intel_gt *gt)
{
#define gt_probe_error(_gt, _fmt, ...) \
do { \
- if (i915_error_injected()) \
- gt_dbg(_gt, _fmt, ##__VA_ARGS__); \
- else \
- gt_err(_gt, _fmt, ##__VA_ARGS__); \
+ gt_err(_gt, _fmt, ##__VA_ARGS__); \
} while (0)
#define gt_WARN(_gt, _condition, _fmt, ...) \
__i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
__i915_gem_object_release_map(wa_ctx->vma->obj);
- /* Verify that we can handle failure to setup the wa_ctx */
- if (!err)
- err = i915_inject_probe_error(engine->i915, -ENODEV);
-
err_unpin:
if (err)
i915_vma_unpin(wa_ctx->vma);
GEM_BUG_ON(huc_fw_size >= wopcm_size);
GEM_BUG_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm_size);
- if (i915_inject_probe_failure(i915))
- return;
-
if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) {
drm_dbg(&i915->drm, "GuC WOPCM is already locked [%uK, %uK)\n",
guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
u32 *cmds;
int err;
- err = i915_inject_probe_error(guc_to_i915(guc), -ENXIO);
- if (err)
- return err;
-
GEM_BUG_ON(ct->vma);
blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
return 0;
}
+ALLOW_ERROR_INJECTION(intel_guc_ct_init, ERRNO);
/**
* intel_guc_ct_fini - Fini buffer-based communication
if (ct->dead_ct_reported)
return;
- if (i915_error_injected())
- return;
-
ct->dead_ct_reported = true;
guc_info(guc, "CTB is dead - reason=0x%X\n", ct->dead_ct_reason);
if (intel_huc_is_authenticated(huc, type))
return -EEXIST;
- ret = i915_inject_probe_error(gt->i915, -ENXIO);
- if (ret)
- goto fail;
-
switch (type) {
case INTEL_HUC_AUTH_BY_GUC:
ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
int ret;
u32 guc_status;
- ret = i915_inject_probe_error(gt->i915, -ENXIO);
- if (ret)
- return ret;
-
ret = intel_reset_guc(gt);
if (ret) {
gt_err(gt, "Failed to reset GuC, ret = %d\n", ret);
static int guc_enable_communication(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = gt->i915;
int ret;
GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
- ret = i915_inject_probe_error(i915, -ENXIO);
- if (ret)
- return ret;
-
ret = intel_guc_ct_enable(&guc->ct);
if (ret)
return ret;
if (!intel_uc_uses_guc(uc))
return 0;
- if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
- return -ENOMEM;
-
ret = intel_guc_init(guc);
if (ret)
return ret;
return 0;
}
+ALLOW_ERROR_INJECTION(__uc_init, ERRNO);
static void __uc_fini(struct intel_uc *uc)
{
GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
- err = i915_inject_probe_error(gt->i915, -ENXIO);
- if (err)
- return err;
-
mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
size | GUC_WOPCM_SIZE_LOCKED);
INTEL_UC_FIRMWARE_NOT_SUPPORTED);
}
-static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
-{
- struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
- bool user = e == -EINVAL;
-
- if (i915_inject_probe_error(i915, e)) {
- /* non-existing blob */
- uc_fw->file_selected.path = "<invalid>";
- uc_fw->user_overridden = user;
- } else if (i915_inject_probe_error(i915, e)) {
- /* require next major version */
- uc_fw->file_wanted.ver.major += 1;
- uc_fw->file_wanted.ver.minor = 0;
- uc_fw->user_overridden = user;
- } else if (i915_inject_probe_error(i915, e)) {
- /* require next minor version */
- uc_fw->file_wanted.ver.minor += 1;
- uc_fw->user_overridden = user;
- } else if (uc_fw->file_wanted.ver.major &&
- i915_inject_probe_error(i915, e)) {
- /* require prev major version */
- uc_fw->file_wanted.ver.major -= 1;
- uc_fw->file_wanted.ver.minor = 0;
- uc_fw->user_overridden = user;
- } else if (uc_fw->file_wanted.ver.minor &&
- i915_inject_probe_error(i915, e)) {
- /* require prev minor version - hey, this should work! */
- uc_fw->file_wanted.ver.minor -= 1;
- uc_fw->user_overridden = user;
- } else if (user && i915_inject_probe_error(i915, e)) {
- /* officially unsupported platform */
- uc_fw->file_wanted.ver.major = 0;
- uc_fw->file_wanted.ver.minor = 0;
- uc_fw->user_overridden = true;
- }
-}
-
static void uc_unpack_css_version(struct intel_uc_fw_ver *ver, u32 css_value)
{
/* Get version numbers from the CSS header */
return -EINVAL;
}
- return i915_inject_probe_error(gt->i915, -EINVAL);
+ return 0;
}
static int check_fw_header(struct intel_gt *gt,
GEM_BUG_ON(!gt->wopcm.size);
GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
- err = i915_inject_probe_error(i915, -ENXIO);
- if (err)
- goto fail;
-
- __force_fw_fetch_failures(uc_fw, -EINVAL);
- __force_fw_fetch_failures(uc_fw, -ESTALE);
-
err = try_firmware_load(uc_fw, &fw);
memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
u64 offset;
int ret;
- ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
- if (ret)
- return ret;
-
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
*/
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
{
- struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
int err;
/* make sure the status was cleared the last time we reset the uc */
GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
- err = i915_inject_probe_error(gt->i915, -ENOEXEC);
- if (err)
- return err;
-
if (!intel_uc_fw_is_loadable(uc_fw))
return -ENOEXEC;
void *vaddr;
int err;
- err = i915_inject_probe_error(gt->i915, -ENXIO);
- if (err)
- return err;
-
if (!uc_fw_need_rsa_in_memory(uc_fw))
return 0;
i915_vma_unpin_and_release(&vma, 0);
return err;
}
+ALLOW_ERROR_INJECTION(uc_fw_rsa_data_create, ERRNO);
static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
{
struct intel_display *display = dev_priv->display;
int ret = 0;
- if (i915_inject_probe_failure(dev_priv))
- return -ENODEV;
-
intel_device_info_runtime_init_early(dev_priv);
intel_step_init(dev_priv);
i915_workqueues_cleanup(dev_priv);
return ret;
}
+ALLOW_ERROR_INJECTION(i915_driver_early_probe, ERRNO);
/**
* i915_driver_late_release - cleanup the setup done in
struct intel_gt *gt;
int ret, i;
- if (i915_inject_probe_failure(dev_priv))
- return -ENODEV;
-
ret = i915_gmch_bridge_setup(dev_priv);
if (ret < 0)
return ret;
return ret;
}
+ALLOW_ERROR_INJECTION(i915_driver_mmio_probe, ERRNO);
/**
* i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
- if (i915_inject_probe_failure(dev_priv))
- return -ENODEV;
-
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
!intel_vgpu_has_full_ppgtt(dev_priv)) {
i915_perf_fini(dev_priv);
return ret;
}
+ALLOW_ERROR_INJECTION(i915_driver_hw_probe, ERRNO);
/**
* i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
i915_param_named_unsafe(gsc_firmware_path, charp, 0400,
"GSC firmware path to use instead of the default one");
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-i915_param_named_unsafe(inject_probe_failure, uint, 0400,
- "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
-#endif
-
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
param(bool, memtest, false, 0400) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
param(unsigned int, reset, 3, 0600) \
- param(unsigned int, inject_probe_failure, 0, 0) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \
param(unsigned int, lmem_size, 0, 0400) \
if (err)
return err;
- if (i915_inject_probe_failure(pdev_to_i915(pdev))) {
- i915_pci_remove(pdev);
- return -ENODEV;
- }
-
err = i915_live_selftests(pdev);
if (err) {
i915_pci_remove(pdev);
return 0;
}
+ALLOW_ERROR_INJECTION(i915_pci_probe, ERRNO);
static void i915_pci_shutdown(struct pci_dev *pdev)
{
drm_notice(&i915->drm, "CI tainted: %#x by %pS\n",
taint, __builtin_return_address(0));
- /* Failures that occur during fault injection testing are expected */
- if (!i915_error_injected())
- __add_taint_for_CI(taint);
+ __add_taint_for_CI(taint);
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-static unsigned int i915_probe_fail_count;
-
-int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
- const char *func, int line)
-{
- if (i915_probe_fail_count >= i915_modparams.inject_probe_failure)
- return 0;
-
- if (++i915_probe_fail_count < i915_modparams.inject_probe_failure)
- return 0;
-
- drm_info(&i915->drm, "Injecting failure %d at checkpoint %u [%s:%d]\n",
- err, i915_modparams.inject_probe_failure, func, line);
-
- i915_modparams.inject_probe_failure = 0;
- return err;
-}
-
-bool i915_error_injected(void)
-{
- return i915_probe_fail_count && !i915_modparams.inject_probe_failure;
-}
-
-#endif
-
bool i915_vtd_active(struct drm_i915_private *i915)
{
if (device_iommu_mapped(i915->drm.dev))
__stringify(x), (long)(x))
#endif
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-
-int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
- const char *func, int line);
-#define i915_inject_probe_error(_i915, _err) \
- __i915_inject_probe_error((_i915), (_err), __func__, __LINE__)
-bool i915_error_injected(void);
-
-#else
-
-#define i915_inject_probe_error(i915, e) ({ BUILD_BUG_ON_INVALID(i915); 0; })
-#define i915_error_injected() false
-
-#endif
-
-#define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV)
-
#define i915_probe_error(i915, fmt, ...) ({ \
- if (i915_error_injected()) \
- drm_dbg(&(i915)->drm, fmt, ##__VA_ARGS__); \
- else \
- drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \
+ drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \
})
#ifndef fetch_and_zero
*/
int intel_gvt_init(struct drm_i915_private *dev_priv)
{
- if (i915_inject_probe_failure(dev_priv))
- return -ENODEV;
-
mutex_lock(&intel_gvt_mutex);
list_add_tail(&dev_priv->vgpu.entry, &intel_gvt_devices);
if (intel_gvt_ops)
GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
GEM_BUG_ON(uncore->fw_domain[domain_id]);
- if (i915_inject_probe_failure(uncore->i915))
- return -ENOMEM;
-
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
return 0;
}
+ALLOW_ERROR_INJECTION(__fw_domain_init, ERRNO);
static void fw_domain_fini(struct intel_uncore *uncore,
enum forcewake_domain_id domain_id)