} while (fault->timestamp < tmp);
}
+int amdgpu_gmc_handle_retry_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry,
+ u64 addr,
+ u32 cam_index,
+ u32 node_id,
+ bool write_fault)
+{
+ int ret;
+
+ if (adev->irq.retry_cam_enabled) {
+ /* Delegate it to a different ring if the hardware hasn't
+ * already done it.
+ */
+ if (entry->ih == &adev->irq.ih) {
+ amdgpu_irq_delegate(adev, entry, 8);
+ return 1;
+ }
+
+ ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
+ addr, entry->timestamp, write_fault);
+ WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
+ if (ret)
+ return 1;
+ } else {
+ /* Process it only if it's the first fault for this address */
+ if (entry->ih != &adev->irq.ih_soft &&
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
+ entry->timestamp))
+ return 1;
+
+ /* Delegate it to a different ring if the hardware hasn't
+ * already done it.
+ */
+ if (entry->ih == &adev->irq.ih) {
+ amdgpu_irq_delegate(adev, entry, 8);
+ return 1;
+ }
+
+ /* Try to handle the recoverable page faults by filling page
+ * tables
+ */
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
+ addr, entry->timestamp, write_fault))
+ return 1;
+ }
+ return 0;
+}
+
int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev)
{
int r;
uint16_t pasid, uint64_t timestamp);
void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid);
+int amdgpu_gmc_handle_retry_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry,
+ u64 addr,
+ u32 cam_index,
+ u32 node_id,
+ bool write_fault);
int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
if (retry_fault) {
+ int ret = amdgpu_gmc_handle_retry_fault(adev, entry, addr, 0, 0,
+ write_fault);
/* Returning 1 here also prevents sending the IV to the KFD */
-
- /* Process it only if it's the first fault for this address */
- if (entry->ih != &adev->irq.ih_soft &&
- amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
- entry->timestamp))
- return 1;
-
- /* Delegate it to a different ring if the hardware hasn't
- * already done it.
- */
- if (entry->ih == &adev->irq.ih) {
- amdgpu_irq_delegate(adev, entry, 8);
- return 1;
- }
-
- /* Try to handle the recoverable page faults by filling page
- * tables
- */
- if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
- entry->timestamp, write_fault))
+ if (ret == 1)
return 1;
}
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
if (retry_fault) {
+ int ret = amdgpu_gmc_handle_retry_fault(adev, entry, addr, 0, 0,
+ write_fault);
/* Returning 1 here also prevents sending the IV to the KFD */
-
- /* Process it only if it's the first fault for this address */
- if (entry->ih != &adev->irq.ih_soft &&
- amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
- entry->timestamp))
- return 1;
-
- /* Delegate it to a different ring if the hardware hasn't
- * already done it.
- */
- if (entry->ih == &adev->irq.ih) {
- amdgpu_irq_delegate(adev, entry, 8);
- return 1;
- }
-
- /* Try to handle the recoverable page faults by filling page
- * tables
- */
- if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
- entry->timestamp, write_fault))
+ if (ret == 1)
return 1;
}
hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
if (retry_fault) {
+ int ret = amdgpu_gmc_handle_retry_fault(adev, entry, addr, 0, 0,
+ write_fault);
/* Returning 1 here also prevents sending the IV to the KFD */
-
- /* Process it only if it's the first fault for this address */
- if (entry->ih != &adev->irq.ih_soft &&
- amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
- entry->timestamp))
- return 1;
-
- /* Delegate it to a different ring if the hardware hasn't
- * already done it.
- */
- if (entry->ih == &adev->irq.ih) {
- amdgpu_irq_delegate(adev, entry, 8);
- return 1;
- }
-
- /* Try to handle the recoverable page faults by filling page
- * tables
- */
- if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
- entry->timestamp, write_fault))
+ if (ret == 1)
return 1;
}
hub = &adev->vmhub[vmhub];
if (retry_fault) {
- if (adev->irq.retry_cam_enabled) {
- /* Delegate it to a different ring if the hardware hasn't
- * already done it.
- */
- if (entry->ih == &adev->irq.ih) {
- amdgpu_irq_delegate(adev, entry, 8);
- return 1;
- }
-
- cam_index = entry->src_data[2] & 0x3ff;
+ cam_index = entry->src_data[2] & 0x3ff;
- ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
- addr, entry->timestamp, write_fault);
- WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
- if (ret)
- return 1;
- } else {
- /* Process it only if it's the first fault for this address */
- if (entry->ih != &adev->irq.ih_soft &&
- amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
- entry->timestamp))
- return 1;
-
- /* Delegate it to a different ring if the hardware hasn't
- * already done it.
- */
- if (entry->ih == &adev->irq.ih) {
- amdgpu_irq_delegate(adev, entry, 8);
- return 1;
- }
-
- /* Try to handle the recoverable page faults by filling page
- * tables
- */
- if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
- addr, entry->timestamp, write_fault))
- return 1;
- }
+ ret = amdgpu_gmc_handle_retry_fault(adev, entry, addr, cam_index, node_id,
+ write_fault);
+ /* Returning 1 here also prevents sending the IV to the KFD */
+ if (ret == 1)
+ return 1;
}
if (kgd2kfd_vmfault_fast_path(adev, entry, retry_fault))