#ifndef _XE_GT_STATS_H_
#define _XE_GT_STATS_H_
+#include <linux/ktime.h>
+
#include "xe_gt_stats_types.h"
struct xe_gt;
}
#endif
+
+/**
+ * xe_gt_stats_ktime_us_delta() - Get delta in microseconds between now and a
+ * start time
+ * @start: Start time
+ *
+ * Helper for GT stats to get delta in microseconds between now and a start
+ * time, compiles out if GT stats are disabled.
+ *
+ * Return: Delta in microseconds between now and a start time
+ */
+static inline s64 xe_gt_stats_ktime_us_delta(ktime_t start)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) ?
+ ktime_us_delta(ktime_get(), start) : 0;
+}
+
+/**
+ * xe_gt_stats_ktime_get() - Get current ktime
+ *
+ * Helper for GT stats to get current ktime, compiles out if GT stats are
+ * disabled.
+ *
+ * Return: Get current ktime
+ */
+static inline ktime_t xe_gt_stats_ktime_get(void)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
+}
+
#endif
mmu_range);
}
-static s64 xe_svm_stats_ktime_us_delta(ktime_t start)
-{
- return IS_ENABLED(CONFIG_DEBUG_FS) ?
- ktime_us_delta(ktime_get(), start) : 0;
-}
-
static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
{
- s64 us_delta = xe_svm_stats_ktime_us_delta(start);
+ s64 us_delta = xe_gt_stats_ktime_us_delta(start);
xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
}
-static ktime_t xe_svm_stats_ktime_get(void)
-{
- return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
-}
-
static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_notifier *notifier,
const struct mmu_notifier_range *mmu_range)
struct xe_device *xe = vm->xe;
struct drm_gpusvm_range *r, *first;
struct xe_tile *tile;
- ktime_t start = xe_svm_stats_ktime_get();
+ ktime_t start = xe_gt_stats_ktime_get();
u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
u8 tile_mask = 0, id;
long err;
unsigned long npages,
ktime_t start)
{
- s64 us_delta = xe_svm_stats_ktime_us_delta(start);
+ s64 us_delta = xe_gt_stats_ktime_us_delta(start);
if (dir == XE_SVM_COPY_TO_VRAM) {
switch (npages) {
u64 vram_addr = XE_VRAM_ADDR_INVALID;
int err = 0, pos = 0;
bool sram = dir == XE_SVM_COPY_TO_SRAM;
- ktime_t start = xe_svm_stats_ktime_get();
+ ktime_t start = xe_gt_stats_ktime_get();
/*
* This flow is complex: it locates physically contiguous device pages,
struct xe_svm_range *range, \
ktime_t start) \
{ \
- s64 us_delta = xe_svm_stats_ktime_us_delta(start); \
+ s64 us_delta = xe_gt_stats_ktime_us_delta(start); \
\
switch (xe_svm_range_size(range)) { \
case SZ_4K: \
struct drm_pagemap *dpagemap;
struct xe_tile *tile = gt_to_tile(gt);
int migrate_try_count = ctx.devmem_only ? 3 : 1;
- ktime_t start = xe_svm_stats_ktime_get(), bind_start, get_pages_start;
+ ktime_t start = xe_gt_stats_ktime_get(), bind_start, get_pages_start;
int err;
lockdep_assert_held_write(&vm->lock);
if (--migrate_try_count >= 0 &&
xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
- ktime_t migrate_start = xe_svm_stats_ktime_get();
+ ktime_t migrate_start = xe_gt_stats_ktime_get();
/* TODO : For multi-device dpagemap will be used to find the
* remote tile and remote device. Will need to modify
}
get_pages:
- get_pages_start = xe_svm_stats_ktime_get();
+ get_pages_start = xe_gt_stats_ktime_get();
range_debug(range, "GET PAGES");
err = xe_svm_range_get_pages(vm, range, &ctx);
xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
range_debug(range, "PAGE FAULT - BIND");
- bind_start = xe_svm_stats_ktime_get();
+ bind_start = xe_gt_stats_ktime_get();
xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
err = xe_vm_drm_exec_lock(vm, &exec);
drm_exec_retry_on_contention(&exec);