xe_lmtt.o \
xe_lmtt_2l.o \
xe_lmtt_ml.o \
+ xe_mert.o \
xe_pci_sriov.o \
xe_sriov_packet.o \
xe_sriov_pf.o \
#define GU_MISC_IRQ REG_BIT(29)
#define ERROR_IRQ(x) REG_BIT(26 + (x))
#define DISPLAY_IRQ REG_BIT(16)
+#define SOC_H2DMEMINT_IRQ REG_BIT(13)
#define I2C_IRQ REG_BIT(12)
#define GT_DW_IRQ(x) REG_BIT(x)
#define MERT_LMEM_CFG XE_REG(0x1448b0)
+#define MERT_TLB_INV_DESC_A XE_REG(0x14cf7c)
+#define MERT_TLB_INV_DESC_A_VALID REG_BIT(0)
+
#endif /* _XE_MERT_REGS_H_ */
#include "xe_late_bind_fw_types.h"
#include "xe_lmtt_types.h"
#include "xe_memirq_types.h"
+#include "xe_mert.h"
#include "xe_oa_types.h"
#include "xe_pagefault_types.h"
#include "xe_platform_types.h"
/** @debugfs: debugfs directory associated with this tile */
struct dentry *debugfs;
+
+ /** @mert: MERT-related data */
+ struct xe_mert mert;
};
/**
#include "xe_hw_error.h"
#include "xe_i2c.h"
#include "xe_memirq.h"
+#include "xe_mert.h"
#include "xe_mmio.h"
#include "xe_pxp.h"
#include "xe_sriov.h"
xe_heci_csc_irq_handler(xe, master_ctl);
xe_display_irq_handler(xe, master_ctl);
xe_i2c_irq_handler(xe, master_ctl);
+ xe_mert_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
}
}
#include "xe_tlb_inval.h"
#include "xe_lmtt.h"
#include "xe_map.h"
+#include "xe_mert.h"
#include "xe_mmio.h"
#include "xe_res_cursor.h"
#include "xe_sriov.h"
* @lmtt: the &xe_lmtt to invalidate
*
* Send requests to all GuCs on this tile to invalidate all TLBs.
+ * If the platform has a standalone MERT, also invalidate MERT's TLB.
*
* This function should be called only when running as a PF driver.
*/
void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt)
{
+ struct xe_tile *tile = lmtt_to_tile(lmtt);
+ struct xe_device *xe = lmtt_to_xe(lmtt);
int err;
- lmtt_assert(lmtt, IS_SRIOV_PF(lmtt_to_xe(lmtt)));
+ lmtt_assert(lmtt, IS_SRIOV_PF(xe));
err = lmtt_invalidate_hw(lmtt);
if (err)
- xe_tile_sriov_err(lmtt_to_tile(lmtt), "LMTT invalidation failed (%pe)",
+ xe_tile_sriov_err(tile, "LMTT invalidation failed (%pe)",
ERR_PTR(err));
+
+ if (xe_device_has_mert(xe) && xe_tile_is_root(tile)) {
+ err = xe_mert_invalidate_lmtt(tile);
+ if (err)
+ xe_tile_sriov_err(tile, "MERT LMTT invalidation failed (%pe)",
+ ERR_PTR(err));
+ }
}
static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
--- /dev/null
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2025, Intel Corporation. All rights reserved.
+ */
+
+#include "regs/xe_irq_regs.h"
+#include "regs/xe_mert_regs.h"
+
+#include "xe_device.h"
+#include "xe_mert.h"
+#include "xe_mmio.h"
+#include "xe_tile.h"
+
+/**
+ * xe_mert_invalidate_lmtt - Invalidate MERT LMTT
+ * @tile: the &xe_tile
+ *
+ * Trigger invalidation of the MERT LMTT and wait for completion.
+ *
+ * Return: 0 on success or -ETIMEDOUT in case of a timeout.
+ */
+int xe_mert_invalidate_lmtt(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_mert *mert = &tile->mert;
+ const long timeout = HZ / 4;
+ unsigned long flags;
+
+ xe_assert(xe, xe_device_has_mert(xe));
+ xe_assert(xe, xe_tile_is_root(tile));
+
+ spin_lock_irqsave(&mert->lock, flags);
+ if (!mert->tlb_inv_triggered) {
+ mert->tlb_inv_triggered = true;
+ reinit_completion(&mert->tlb_inv_done);
+ xe_mmio_write32(&tile->mmio, MERT_TLB_INV_DESC_A, MERT_TLB_INV_DESC_A_VALID);
+ }
+ spin_unlock_irqrestore(&mert->lock, flags);
+
+ if (!wait_for_completion_timeout(&mert->tlb_inv_done, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * xe_mert_irq_handler - Handler for MERT interrupts
+ * @xe: the &xe_device
+ * @master_ctl: interrupt register
+ *
+ * Handle interrupts generated by MERT.
+ */
+void xe_mert_irq_handler(struct xe_device *xe, u32 master_ctl)
+{
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+ unsigned long flags;
+ u32 reg_val;
+
+ if (!(master_ctl & SOC_H2DMEMINT_IRQ))
+ return;
+
+ spin_lock_irqsave(&tile->mert.lock, flags);
+ if (tile->mert.tlb_inv_triggered) {
+ reg_val = xe_mmio_read32(&tile->mmio, MERT_TLB_INV_DESC_A);
+ if (!(reg_val & MERT_TLB_INV_DESC_A_VALID)) {
+ tile->mert.tlb_inv_triggered = false;
+ complete_all(&tile->mert.tlb_inv_done);
+ }
+ }
+ spin_unlock_irqrestore(&tile->mert.lock, flags);
+}
--- /dev/null
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2025, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __XE_MERT_H__
+#define __XE_MERT_H__
+
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct xe_device;
+struct xe_tile;
+
+struct xe_mert {
+ /** @lock: protects the TLB invalidation status */
+ spinlock_t lock;
+ /** @tlb_inv_triggered: indicates if TLB invalidation was triggered */
+ bool tlb_inv_triggered;
+ /** @mert.tlb_inv_done: completion of TLB invalidation */
+ struct completion tlb_inv_done;
+};
+
+#ifdef CONFIG_PCI_IOV
+int xe_mert_invalidate_lmtt(struct xe_tile *tile);
+void xe_mert_irq_handler(struct xe_device *xe, u32 master_ctl);
+#else
+static inline void xe_mert_irq_handler(struct xe_device *xe, u32 master_ctl) { }
+#endif
+
+#endif /* __XE_MERT_H__ */
*/
int xe_sriov_pf_init_early(struct xe_device *xe)
{
+ struct xe_mert *mert = &xe_device_get_root_tile(xe)->mert;
int err;
xe_assert(xe, IS_SRIOV_PF(xe));
xe_sriov_pf_service_init(xe);
+ spin_lock_init(&mert->lock);
+ init_completion(&mert->tlb_inv_done);
+
return 0;
}