]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
perf/x86/intel/uncore: Support IIO free-running counters on DMR
authorZide Chen <zide.chen@intel.com>
Wed, 31 Dec 2025 22:42:25 +0000 (14:42 -0800)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 6 Jan 2026 15:34:25 +0000 (16:34 +0100)
The free-running counters for IIO uncore blocks on Diamond Rapids are
similar to Sapphire Rapids IMC freecounters, with the following
differences:

- The counters are MMIO based.
- Only a subset of IP blocks implement free-running counters:
  HIOP0 (IP Base Addr: 2E7000h)
  HIOP1 (IP Base Addr: 2EF000h)
  HIOP3 (IP Base Addr: 2FF000h)
  HIOP4 (IP Base Addr: 307000h)
- IMH2 (Secondary IMH) does not provide free-running counters.

Signed-off-by: Zide Chen <zide.chen@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://patch.msgid.link/20251231224233.113839-9-zide.chen@intel.com
arch/x86/events/intel/uncore_snbep.c

index cfb4ce325bb6355b2acf7a3c9a881806813491b9..cc8145e7030e3cedf81e110e5695e04a256770bc 100644 (file)
 #define SPR_C0_MSR_PMON_BOX_FILTER0            0x200e
 
 /* DMR */
+#define DMR_IMH1_HIOP_MMIO_BASE                        0x1ffff6ae7000
+#define DMR_HIOP_MMIO_SIZE                     0x8000
 #define DMR_CXLCM_EVENT_MASK_EXT               0xf
 #define DMR_HAMVF_EVENT_MASK_EXT               0xffffffff
 #define DMR_PCIE4_EVENT_MASK_EXT               0xffffff
 
+#define UNCORE_DMR_ITC                         0x30
+
 #define DMR_IMC_PMON_FIXED_CTR                 0x18
 #define DMR_IMC_PMON_FIXED_CTL                 0x10
 
@@ -6442,7 +6446,11 @@ static int uncore_type_max_boxes(struct intel_uncore_type **types,
        for (node = rb_first(type->boxes); node; node = rb_next(node)) {
                unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
 
-               if (unit->id > max)
+               /*
+                * on DMR IMH2, the unit id starts from 0x8000,
+                * and we don't need to count it.
+                */
+               if ((unit->id > max) && (unit->id < 0x8000))
                        max = unit->id;
        }
        return max + 1;
@@ -6930,6 +6938,101 @@ int dmr_uncore_cbb_units_ignore[] = {
        UNCORE_IGNORE_END
 };
 
+static unsigned int dmr_iio_freerunning_box_offsets[] = {
+       0x0, 0x8000, 0x18000, 0x20000
+};
+
+static void dmr_uncore_freerunning_init_box(struct intel_uncore_box *box)
+{
+       struct intel_uncore_type *type = box->pmu->type;
+       u64 mmio_base;
+
+       if (box->pmu->pmu_idx >= type->num_boxes)
+               return;
+
+       mmio_base = DMR_IMH1_HIOP_MMIO_BASE;
+       mmio_base += dmr_iio_freerunning_box_offsets[box->pmu->pmu_idx];
+
+       box->io_addr = ioremap(mmio_base, type->mmio_map_size);
+       if (!box->io_addr)
+               pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
+}
+
+static struct intel_uncore_ops dmr_uncore_freerunning_ops = {
+       .init_box       = dmr_uncore_freerunning_init_box,
+       .exit_box       = uncore_mmio_exit_box,
+       .read_counter   = uncore_mmio_read_counter,
+       .hw_config      = uncore_freerunning_hw_config,
+};
+
+enum perf_uncore_dmr_iio_freerunning_type_id {
+       DMR_ITC_INB_DATA_BW,
+       DMR_ITC_BW_IN,
+       DMR_OTC_BW_OUT,
+       DMR_OTC_CLOCK_TICKS,
+
+       DMR_IIO_FREERUNNING_TYPE_MAX,
+};
+
+static struct freerunning_counters dmr_iio_freerunning[] = {
+       [DMR_ITC_INB_DATA_BW]   = { 0x4d40, 0x8, 0, 8, 48},
+       [DMR_ITC_BW_IN]         = { 0x6b00, 0x8, 0, 8, 48},
+       [DMR_OTC_BW_OUT]        = { 0x6b60, 0x8, 0, 8, 48},
+       [DMR_OTC_CLOCK_TICKS]   = { 0x6bb0, 0x8, 0, 1, 48},
+};
+
+static struct uncore_event_desc dmr_uncore_iio_freerunning_events[] = {
+       /*  ITC Free Running Data BW counter for inbound traffic */
+       INTEL_UNCORE_FR_EVENT_DESC(inb_data_port0, 0x10, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(inb_data_port1, 0x11, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(inb_data_port2, 0x12, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(inb_data_port3, 0x13, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(inb_data_port4, 0x14, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(inb_data_port5, 0x15, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(inb_data_port6, 0x16, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(inb_data_port7, 0x17, "3.814697266e-6"),
+
+       /*  ITC Free Running BW IN counters */
+       INTEL_UNCORE_FR_EVENT_DESC(bw_in_port0, 0x20, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_in_port1, 0x21, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_in_port2, 0x22, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_in_port3, 0x23, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_in_port4, 0x24, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_in_port5, 0x25, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_in_port6, 0x26, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_in_port7, 0x27, "3.814697266e-6"),
+
+       /*  ITC Free Running BW OUT counters */
+       INTEL_UNCORE_FR_EVENT_DESC(bw_out_port0, 0x30, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_out_port1, 0x31, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_out_port2, 0x32, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_out_port3, 0x33, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_out_port4, 0x34, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_out_port5, 0x35, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_out_port6, 0x36, "3.814697266e-6"),
+       INTEL_UNCORE_FR_EVENT_DESC(bw_out_port7, 0x37, "3.814697266e-6"),
+
+       /* Free Running Clock Counter */
+       INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x40"),
+       { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_type dmr_uncore_iio_free_running = {
+       .name                   = "iio_free_running",
+       .num_counters           = 25,
+       .mmio_map_size          = DMR_HIOP_MMIO_SIZE,
+       .num_freerunning_types  = DMR_IIO_FREERUNNING_TYPE_MAX,
+       .freerunning            = dmr_iio_freerunning,
+       .ops                    = &dmr_uncore_freerunning_ops,
+       .event_descs            = dmr_uncore_iio_freerunning_events,
+       .format_group           = &skx_uncore_iio_freerunning_format_group,
+};
+
+#define UNCORE_DMR_MMIO_EXTRA_UNCORES          1
+static struct intel_uncore_type *dmr_mmio_uncores[UNCORE_DMR_MMIO_EXTRA_UNCORES] = {
+       &dmr_uncore_iio_free_running,
+};
+
 int dmr_uncore_pci_init(void)
 {
        uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL,
@@ -6937,11 +7040,16 @@ int dmr_uncore_pci_init(void)
                                                dmr_uncores);
        return 0;
 }
+
 void dmr_uncore_mmio_init(void)
 {
-       uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
-                                                UNCORE_DMR_NUM_UNCORE_TYPES,
-                                                dmr_uncores);
-}
+       uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
+                                               UNCORE_DMR_MMIO_EXTRA_UNCORES,
+                                               dmr_mmio_uncores,
+                                               UNCORE_DMR_NUM_UNCORE_TYPES,
+                                               dmr_uncores);
 
+       dmr_uncore_iio_free_running.num_boxes =
+               uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_DMR_ITC);
+}
 /* end of DMR uncore support */