static u64 pf_get_lmem_alignment(struct xe_gt *gt)
{
- /* this might be platform dependent */
- return SZ_2M;
+ return xe_lmtt_page_size(>->tile->sriov.pf.lmtt);
}
static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
return BIT_ULL(lmtt->ops->lmtt_pte_shift(0));
}
+/**
+ * xe_lmtt_page_size() - Get LMTT page size.
+ * @lmtt: the &xe_lmtt
+ *
+ * This function shall be called only by PF.
+ *
+ * Return: LMTT page size.
+ */
+u64 xe_lmtt_page_size(struct xe_lmtt *lmtt)
+{
+ lmtt_assert(lmtt, IS_SRIOV_PF(lmtt_to_xe(lmtt)));
+ lmtt_assert(lmtt, xe_device_has_lmtt(lmtt_to_xe(lmtt)));
+ lmtt_assert(lmtt, lmtt->ops);
+
+ return lmtt_page_size(lmtt);
+}
+
static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level)
{
unsigned int num_entries = level ? lmtt->ops->lmtt_pte_num(level) : 0;
int xe_lmtt_populate_pages(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 offset);
void xe_lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid);
u64 xe_lmtt_estimate_pt_size(struct xe_lmtt *lmtt, u64 size);
+u64 xe_lmtt_page_size(struct xe_lmtt *lmtt);
#else
static inline int xe_lmtt_init(struct xe_lmtt *lmtt) { return 0; }
static inline void xe_lmtt_init_hw(struct xe_lmtt *lmtt) { }