* if the function should allocate a new one.
* @tile: The tile to select for migration of this bo, and the tile used for
* GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
- * @resv: Pointer to a locked shared reservation object to use fo this bo,
+ * @resv: Pointer to a locked shared reservation object to use for this bo,
* or NULL for the xe_bo to use its own.
* @bulk: The bulk move to use for LRU bumping, or NULL for external bos.
* @size: The storage size to use for the bo.
* @size: The storage size to use for the bo.
* @type: The TTM buffer object type.
* @flags: XE_BO_FLAG_ flags.
- * @intr: Whether to execut any waits for backing store interruptible.
+ * @intr: Whether to execute any waits for backing store interruptible.
*
* Create a pinned and mapped bo. The bo will be external and not associated
* with a VM.
* BO management
* =============
*
- * TTM manages (placement, eviction, etc...) all BOs in XE.
+ * TTM manages (placement, eviction, etc...) all BOs in Xe.
*
* BO creation
* ===========
* a kernel BO (e.g. engine state, memory for page tables, etc...). These BOs
* are typically mapped in the GGTT (any kernel BOs aside memory for page tables
* are in the GGTT), are pinned (can't move or be evicted at runtime), have a
- * vmap (XE can access the memory via xe_map layer) and have contiguous physical
+ * vmap (Xe can access the memory via xe_map layer) and have contiguous physical
* memory.
*
* More details of why kernel BOs are pinned and contiguous below.
* A user BO is created via the DRM_IOCTL_XE_GEM_CREATE IOCTL. Once it is
* created the BO can be mmap'd (via DRM_IOCTL_XE_GEM_MMAP_OFFSET) for user
* access and it can be bound for GPU access (via DRM_IOCTL_XE_VM_BIND). All
- * user BOs are evictable and user BOs are never pinned by XE. The allocation of
+ * user BOs are evictable and user BOs are never pinned by Xe. The allocation of
* the backing store can be deferred from creation time until first use which is
* either mmap, bind, or pagefault.
*
* ====================
*
* All eviction (or in other words, moving a BO from one memory location to
- * another) is routed through TTM with a callback into XE.
+ * another) is routed through TTM with a callback into Xe.
*
* Runtime eviction
* ----------------
* Overview
* ========
*
- * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
+ * Configfs is a filesystem-based manager of kernel objects. Xe KMD registers a
* configfs subsystem called ``xe`` that creates a directory in the mounted
* configfs directory. The user can create devices under this directory and
* configure them as necessary. See Documentation/filesystems/configfs.rst for
*
* /sys/bus/pci/devices/<device>/survivability_mode
*
- * - Admin/userpsace consumer can use firmware flashing tools like fwupd to flash
+ * - Admin/userspace consumer can use firmware flashing tools like fwupd to flash
* firmware and restore device to normal operation.
*/
};
/**
- * struct xe_device - Top level struct of XE device
+ * struct xe_device - Top level struct of Xe device
*/
struct xe_device {
/** @drm: drm device */
u32 media_verx100;
/** @info.mem_region_mask: mask of valid memory regions */
u32 mem_region_mask;
- /** @info.platform: XE platform enum */
+ /** @info.platform: Xe platform enum */
enum xe_platform platform;
- /** @info.subplatform: XE subplatform enum */
+ /** @info.subplatform: Xe subplatform enum */
enum xe_subplatform subplatform;
/** @info.devid: device ID */
u16 devid;
};
/**
- * struct xe_file - file handle for XE driver
+ * struct xe_file - file handle for Xe driver
*/
struct xe_file {
/** @xe: xe DEVICE **/
* - Binding at exec time
* - Flow controlling the ring at exec time
*
- * In XE we avoid all of this complication by not allowing a BO list to be
+ * In Xe we avoid all of this complication by not allowing a BO list to be
* passed into an exec, using the dma-buf implicit sync uAPI, have binds as
* separate operations, and using the DRM scheduler to flow control the ring.
* Let's deep dive on each of these.
};
/**
- * struct xe_force_wake_domain - XE force wake domains
+ * struct xe_force_wake_domain - Xe force wake domains
*/
struct xe_force_wake_domain {
/** @id: domain force wake id */
};
/**
- * struct xe_force_wake - XE force wake
+ * struct xe_force_wake - Xe force wake
*/
struct xe_force_wake {
/** @gt: back pointers to GT */
* - act_freq: The actual resolved frequency decided by PCODE.
* - cur_freq: The current one requested by GuC PC to the PCODE.
* - rpn_freq: The Render Performance (RP) N level, which is the minimal one.
- * - rpa_freq: The Render Performance (RP) A level, which is the achiveable one.
+ * - rpa_freq: The Render Performance (RP) A level, which is the achievable one.
* Calculated by PCODE at runtime based on multiple running conditions
* - rpe_freq: The Render Performance (RP) E level, which is the efficient one.
* Calculated by PCODE at runtime based on multiple running conditions
gt->sriov.vf.migration.recovery_queued = true;
WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, true);
- smp_wmb(); /* Ensure above writes visable before wake */
+ smp_wmb(); /* Ensure above writes visible before wake */
xe_guc_ct_wake_waiters(>->uc.guc.ct);
* struct xe_guc_ads - GuC additional data structures (ADS)
*/
struct xe_guc_ads {
- /** @bo: XE BO for GuC ads blob */
+ /** @bo: Xe BO for GuC ads blob */
struct xe_bo *bo;
/** @golden_lrc_size: golden LRC size */
size_t golden_lrc_size;
* for the H2G and G2H requests sent and received through the buffers.
*/
struct xe_guc_ct {
- /** @bo: XE BO for CT */
+ /** @bo: Xe BO for CT */
struct xe_bo *bo;
/** @lock: protects everything in CT layer */
struct mutex lock;
struct xe_guc_log {
/** @level: GuC log level */
u32 level;
- /** @bo: XE BO for GuC log */
+ /** @bo: Xe BO for GuC log */
struct xe_bo *bo;
/** @stats: logging related stats */
struct {
}
/*
- * All of these functions are an abstraction layer which other parts of XE can
+ * All of these functions are an abstraction layer which other parts of Xe can
* use to trap into the GuC backend. All of these functions, aside from init,
* really shouldn't do much other than trap into the DRM scheduler which
* synchronizes these operations.
* @guc: GuC object
* @tlb_inval: TLB invalidation client
*
- * Inititialize GuC TLB invalidation by setting back pointer in TLB invalidation
+ * Initialize GuC TLB invalidation by setting back pointer in TLB invalidation
* client to the GuC and setting GuC backend ops.
*/
void xe_guc_tlb_inval_init_early(struct xe_guc *guc,
* DOC: Map layer
*
* All access to any memory shared with a device (both sysmem and vram) in the
- * XE driver should go through this layer (xe_map). This layer is built on top
+ * Xe driver should go through this layer (xe_map). This layer is built on top
* of :ref:`driver-api/device-io:Generalizing Access to System and I/O Memory`
- * and with extra hooks into the XE driver that allows adding asserts to memory
+ * and with extra hooks into the Xe driver that allows adding asserts to memory
* accesses (e.g. for blocking runtime_pm D3Cold on Discrete Graphics).
*/
*
* Copy from an array dma addresses to a VRAM device physical address
*
- * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
+ * Return: dma fence for migrate to signal completion on success, ERR_PTR on
* failure
*/
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
*
* Copy from a VRAM device physical address to an array dma addresses
*
- * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
+ * Return: dma fence for migrate to signal completion on success, ERR_PTR on
* failure
*/
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
/**
* DOC: Migrate Layer
*
- * The XE migrate layer is used generate jobs which can copy memory (eviction),
+ * The Xe migrate layer is used generate jobs which can copy memory (eviction),
* clear memory, or program tables (binds). This layer exists in every GT, has
* a migrate engine, and uses a special VM for all generated jobs.
*
/**
* xe_pm_might_block_on_suspend() - Annotate that the code might block on suspend
*
- * Annotation to use where the code might block or sieze to make
+ * Annotation to use where the code might block or seize to make
* progress pending resume completion.
*/
void xe_pm_might_block_on_suspend(void)
struct xe_exec_queue;
/**
- * struct xe_preempt_fence - XE preempt fence
+ * struct xe_preempt_fence - Xe preempt fence
*
* hardware and triggers a callback once the xe_engine is complete.
*/
struct xe_range_fence_tree;
struct xe_range_fence;
-/** struct xe_range_fence_ops - XE range fence ops */
+/** struct xe_range_fence_ops - Xe range fence ops */
struct xe_range_fence_ops {
/** @free: free range fence op */
void (*free)(struct xe_range_fence *rfence);
};
-/** struct xe_range_fence - XE range fence (address conflict tracking) */
+/** struct xe_range_fence - Xe range fence (address conflict tracking) */
struct xe_range_fence {
/** @rb: RB tree node inserted into interval tree */
struct rb_node rb;
}
/**
- * xe_sched_job_destroy - Destroy XE schedule job
- * @ref: reference to XE schedule job
+ * xe_sched_job_destroy - Destroy Xe schedule job
+ * @ref: reference to Xe schedule job
*
* Called when ref == 0, drop a reference to job's xe_engine + fence, cleanup
- * base DRM schedule job, and free memory for XE schedule job.
+ * base DRM schedule job, and free memory for Xe schedule job.
*/
void xe_sched_job_destroy(struct kref *ref)
{
void xe_sched_job_destroy(struct kref *ref);
/**
- * xe_sched_job_get - get reference to XE schedule job
- * @job: XE schedule job object
+ * xe_sched_job_get - get reference to Xe schedule job
+ * @job: Xe schedule job object
*
- * Increment XE schedule job's reference count
+ * Increment Xe schedule job's reference count
*/
static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job)
{
}
/**
- * xe_sched_job_put - put reference to XE schedule job
- * @job: XE schedule job object
+ * xe_sched_job_put - put reference to Xe schedule job
+ * @job: Xe schedule job object
*
- * Decrement XE schedule job's reference count, call xe_sched_job_destroy when
+ * Decrement Xe schedule job's reference count, call xe_sched_job_destroy when
* reference count == 0.
*/
static inline void xe_sched_job_put(struct xe_sched_job *job)
};
/**
- * struct xe_sched_job - XE schedule job (batch buffer tracking)
+ * struct xe_sched_job - Xe schedule job (batch buffer tracking)
*/
struct xe_sched_job {
/** @drm: base DRM scheduler job */
/*
* XXX: We can't derive the GT here (or anywhere in this functions, but
- * compute always uses the primary GT so accumlate stats on the likely
+ * compute always uses the primary GT so accumulate stats on the likely
* GT of the fault.
*/
if (gt)
* xe_tlb_inval_fence_wait() - TLB invalidiation fence wait
* @fence: TLB invalidation fence to wait on
*
- * Wait on a TLB invalidiation fence until it signals, non interruptable
+ * Wait on a TLB invalidiation fence until it signals, non interruptible
*/
static inline void
xe_tlb_inval_fence_wait(struct xe_tlb_inval_fence *fence)
#include <drm/ttm/ttm_device.h>
/**
- * struct xe_ttm_vram_mgr - XE TTM VRAM manager
+ * struct xe_ttm_vram_mgr - Xe TTM VRAM manager
*
* Manages placement of TTM resource in VRAM.
*/
};
/**
- * struct xe_ttm_vram_mgr_resource - XE TTM VRAM resource
+ * struct xe_ttm_vram_mgr_resource - Xe TTM VRAM resource
*/
struct xe_ttm_vram_mgr_resource {
/** @base: Base TTM resource */
};
/**
- * struct xe_uc_fw_version - Version for XE micro controller firmware
+ * struct xe_uc_fw_version - Version for Xe micro controller firmware
*/
struct xe_uc_fw_version {
/** @branch: branch version of the FW (not always available) */
};
/**
- * struct xe_uc_fw - XE micro controller firmware
+ * struct xe_uc_fw - Xe micro controller firmware
*/
struct xe_uc_fw {
/** @type: type uC firmware */
/** @size: size of uC firmware including css header */
size_t size;
- /** @bo: XE BO for uC firmware */
+ /** @bo: Xe BO for uC firmware */
struct xe_bo *bo;
/** @has_gsc_headers: whether the FW image starts with GSC headers */
#include "xe_wopcm_types.h"
/**
- * struct xe_uc - XE micro controllers
+ * struct xe_uc - Xe micro controllers
*/
struct xe_uc {
/** @guc: Graphics micro controller */
* @request_exclusive: Whether to lock exclusively (write mode) the next time
* the domain lock is locked.
* @exec_flags: The drm_exec flags used for drm_exec (re-)initialization.
- * @nr: The drm_exec nr parameter used for drm_exec (re-)initializaiton.
+ * @nr: The drm_exec nr parameter used for drm_exec (re-)initialization.
*/
struct xe_validation_ctx {
struct drm_exec *exec;
* @_ret: The current error value possibly holding -ENOMEM
*
* Use this in way similar to drm_exec_retry_on_contention().
- * If @_ret contains -ENOMEM the tranaction is restarted once in a way that
+ * If @_ret contains -ENOMEM the transaction is restarted once in a way that
* blocks other transactions and allows exhastive eviction. If the transaction
* was already restarted once, Just return the -ENOMEM. May also set
* _ret to -EINTR if not retrying and waits are interruptible.
* @_val: The xe_validation_device.
* @_exec: The struct drm_exec object
* @_flags: Flags for the xe_validation_ctx initialization.
- * @_ret: Return in / out parameter. May be set by this macro. Typicall 0 when called.
+ * @_ret: Return in / out parameter. May be set by this macro. Typically 0 when called.
*
* This macro is will initiate a drm_exec transaction with additional support for
* exhaustive eviction.
*
* (re)bind SVM range setting up GPU page tables for the range.
*
- * Return: dma fence for rebind to signal completion on succees, ERR_PTR on
+ * Return: dma fence for rebind to signal completion on success, ERR_PTR on
* failure
*/
struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
*
* Unbind SVM range removing the GPU page tables for the range.
*
- * Return: dma fence for unbind to signal completion on succees, ERR_PTR on
+ * Return: dma fence for unbind to signal completion on success, ERR_PTR on
* failure
*/
struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
* selection of options. The user PAT index is only for encoding leaf
* nodes, where we have use of more bits to do the encoding. The
* non-leaf nodes are instead under driver control so the chosen index
- * here should be distict from the user PAT index. Also the
+ * here should be distinct from the user PAT index. Also the
* corresponding coherency of the PAT index should be tied to the
* allocation type of the page table (or at least we should pick
* something which is always safe).
/**
* xe_vma_need_vram_for_atomic - Check if VMA needs VRAM migration for atomic operations
- * @xe: Pointer to the XE device structure
+ * @xe: Pointer to the Xe device structure
* @vma: Pointer to the virtual memory area (VMA) structure
* @is_atomic: In pagefault path and atomic operation
*
xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
} else if (__op->op == DRM_GPUVA_OP_MAP) {
vma = op->map.vma;
- /* In case of madvise call, MAP will always be follwed by REMAP.
+ /* In case of madvise call, MAP will always be followed by REMAP.
* Therefore temp_attr will always have sane values, making it safe to
* copy them to new vma.
*/
#define _XE_VM_DOC_H_
/**
- * DOC: XE VM (user address space)
+ * DOC: Xe VM (user address space)
*
* VM creation
* ===========
* User pointers are user allocated memory (malloc'd, mmap'd, etc..) for which the
* user wants to create a GPU mapping. Typically in other DRM drivers a dummy BO
* was created and then a binding was created. We bypass creating a dummy BO in
- * XE and simply create a binding directly from the userptr.
+ * Xe and simply create a binding directly from the userptr.
*
* Invalidation
* ------------
*
* Since this a core kernel managed memory the kernel can move this memory
- * whenever it wants. We register an invalidation MMU notifier to alert XE when
+ * whenever it wants. We register an invalidation MMU notifier to alert Xe when
* a user pointer is about to move. The invalidation notifier needs to block
* until all pending users (jobs or compute mode engines) of the userptr are
* idle to ensure no faults. This done by waiting on all of VM's dma-resv slots.
* =======
*
* VM locking protects all of the core data paths (bind operations, execs,
- * evictions, and compute mode rebind worker) in XE.
+ * evictions, and compute mode rebind worker) in Xe.
*
* Locks
* -----
* struct xe_vma_mem_attr - memory attributes associated with vma
*/
struct xe_vma_mem_attr {
- /** @preferred_loc: perferred memory_location */
+ /** @preferred_loc: preferred memory_location */
struct {
/** @preferred_loc.migration_policy: Pages migration policy */
u32 migration_policy;
u64 tlb_flush_seqno;
/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
bool batch_invalidate_tlb;
- /** @xef: XE file handle for tracking this VM's drm client */
+ /** @xef: Xe file handle for tracking this VM's drm client */
struct xe_file *xef;
};