struct amdgpu_device *adev = obj->adev;
struct amdgpu_ras_block_object *block_obj =
amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- if (!block_obj)
+ if (!block_obj || !con)
return;
/* both query_poison_status and handle_poison_consumption are optional,
if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
- /* gpu reset is fallback for failed and default cases */
- if (poison_stat) {
+ /* gpu reset is fallback for failed and default cases.
+ * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
+ */
+ if (poison_stat && !con->is_rma) {
dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
block_obj->ras_comm.name);
amdgpu_ras_reset_gpu(adev);
- } else {
- amdgpu_gfx_poison_consumption_handler(adev, entry);
}
+
+ if (!poison_stat)
+ amdgpu_gfx_poison_consumption_handler(adev, entry);
}
static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
page_retirement_dwork.work);
struct amdgpu_device *adev = con->adev;
struct ras_err_data err_data;
+ unsigned long err_cnt;
if (amdgpu_in_reset(adev) || atomic_read(&con->in_recovery))
return;
amdgpu_ras_error_data_init(&err_data);
amdgpu_umc_handle_bad_pages(adev, &err_data);
+ err_cnt = err_data.err_addr_cnt;
amdgpu_ras_error_data_fini(&err_data);
+ if (err_cnt && con->is_rma)
+ amdgpu_ras_reset_gpu(adev);
+
mutex_lock(&con->umc_ecc_log.lock);
if (radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
UMC_ECC_NEW_DETECTED_TAG))
if (poison_msg->pasid_fn)
poison_msg->pasid_fn(adev, pasid, poison_msg->data);
- if (reset) {
+ /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
+ if (reset && !con->is_rma) {
flush_delayed_work(&con->page_retirement_dwork);
con->gpu_reset_flags |= reset;
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ /* mode1 is the only selection for RMA status */
+ if (ras->is_rma) {
+ ras->gpu_reset_flags = 0;
+ ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ }
+
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
return 0;
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
amdgpu_umc_handle_bad_pages(adev, ras_error_status);
- if (err_data->ue_count && reset) {
+ if ((err_data->ue_count || err_data->de_count) &&
+ (reset || (con && con->is_rma))) {
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
.block = AMDGPU_RAS_BLOCK__UMC,
};
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
uint32_t timeout = timeout_ms;
memset(&err_data, 0, sizeof(err_data));
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (reset) {
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-
+ if (reset || (err_data.err_addr_cnt && con && con->is_rma)) {
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
if (entry && (entry->client_id == SOC21_IH_CLIENTID_GFX) &&
(entry->src_id == GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT) &&
!entry->vmid && !entry->pasid) {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
uint32_t rlc_status0 = 0;
rlc_status0 = RREG32_SOC15(GC, 0, regRLC_RLCS_FED_STATUS_0);
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
}
- amdgpu_ras_reset_gpu(adev);
+ if (con && !con->is_rma)
+ amdgpu_ras_reset_gpu(adev);
}
return 0;