struct amdgpu_job *job)
{
struct amdgpu_ring *ring = amdgpu_job_ring(job);
+ struct amdgpu_device *adev = ring->adev;
unsigned int i;
int r;
#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
-/* VA hole for 48bit addresses on Vega10 */
-#define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL
-#define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL
+/* VA hole for 48bit and 57bit addresses */
+#define AMDGPU_GMC_HOLE_START (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\
+ 0x0100000000000000ULL : 0x0000800000000000ULL)
+#define AMDGPU_GMC_HOLE_END (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\
+ 0xff00000000000000ULL : 0xffff800000000000ULL)
/*
* Hardware is programmed as if the hole doesn't exists with start and end
* This mask is used to remove the upper 16bits of the VA and so come up with
* the linear addr value.
*/
-#define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
+#define AMDGPU_GMC_HOLE_MASK (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\
+ 0x00ffffffffffffffULL : 0x0000ffffffffffffULL)
/*
* Ring size as power of two for the log of recent faults.
*
* @addr: address to extend
*/
-static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
-{
- if (addr >= AMDGPU_GMC_HOLE_START)
- addr |= AMDGPU_GMC_HOLE_END;
-
- return addr;
-}
+#define amdgpu_gmc_sign_extend(addr) ((addr) >= AMDGPU_GMC_HOLE_START ?\
+ ((addr) | AMDGPU_GMC_HOLE_END) : (addr))
bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev);
int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev);
return 0;
}
-int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
+int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *queue,
u64 addr, u64 expected_size)
{
struct amdgpu_bo_va_mapping *va_map;
db_info.doorbell_offset = args->in.doorbell_offset;
/* Validate the userq virtual address.*/
- if (amdgpu_userq_input_va_validate(queue, args->in.queue_va, args->in.queue_size) ||
- amdgpu_userq_input_va_validate(queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
- amdgpu_userq_input_va_validate(queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
+ if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) ||
+ amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
+ amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
r = -EINVAL;
kfree(queue);
goto unlock;
void amdgpu_userq_pre_reset(struct amdgpu_device *adev);
int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost);
-int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
+int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *queue,
u64 addr, u64 expected_size);
int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
/**
* amdgpu_userq_fence_read_wptr - Read the userq wptr value
*
+ * @adev: amdgpu_device pointer
* @queue: user mode queue structure pointer
* @wptr: write pointer value
*
*
* Returns wptr value on success, error on failure.
*/
-static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
+static int amdgpu_userq_fence_read_wptr(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *queue,
u64 *wptr)
{
struct amdgpu_bo_va_mapping *mapping;
int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
struct drm_amdgpu_userq_signal *args = data;
goto put_gobj_write;
}
- r = amdgpu_userq_fence_read_wptr(queue, &wptr);
+ r = amdgpu_userq_fence_read_wptr(adev, queue, &wptr);
if (r)
goto put_gobj_write;
* Set the internal MC address mask This is the max address of the GPU's
* internal address space.
*/
- adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
+ adev->gmc.mc_mask = AMDGPU_GMC_HOLE_MASK;
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
if (r) {
}
static int
-mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr,
+mes_userq_create_wptr_mapping(struct amdgpu_device *adev,
+ struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue,
uint64_t wptr)
{
goto free_mqd;
}
- r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va,
+ r = amdgpu_userq_input_va_validate(adev, queue, compute_mqd->eop_va,
2048);
if (r)
goto free_mqd;
userq_props->tmz_queue =
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
- r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->shadow_va,
+ r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->shadow_va,
shadow_info.shadow_size);
if (r)
goto free_mqd;
- r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va,
+ r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->csa_va,
shadow_info.csa_size);
if (r)
goto free_mqd;
r = -ENOMEM;
goto free_mqd;
}
- r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va,
+ r = amdgpu_userq_input_va_validate(adev, queue, mqd_sdma_v11->csa_va,
32);
if (r)
goto free_mqd;
}
/* FW expects WPTR BOs to be mapped into GART */
- r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr);
+ r = mes_userq_create_wptr_mapping(adev, uq_mgr, queue, userq_props->wptr_gpu_addr);
if (r) {
DRM_ERROR("Failed to create WPTR mapping\n");
goto free_ctx;
{
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+ struct amdgpu_device *adev = parser->adev;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
uint64_t addr)
{
struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_device *adev = p->adev;
struct amdgpu_bo_va_mapping *map;
uint32_t *msg, num_buffers;
struct amdgpu_bo *bo;
uint64_t addr)
{
struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_device *adev = p->adev;
struct amdgpu_bo_va_mapping *map;
uint32_t *msg, num_buffers;
struct amdgpu_bo *bo;