REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
}
+static void dmc_configure_event(struct intel_display *display,
+ enum intel_dmc_id dmc_id,
+ unsigned int event_id,
+ bool enable)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ int num_handlers = 0;
+ int i;
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i];
+ u32 data = dmc->dmc_info[dmc_id].mmiodata[i];
+
+ if (!is_event_handler(display, dmc_id, event_id, reg, data))
+ continue;
+
+ intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable());
+ num_handlers++;
+ }
+
+ drm_WARN_ONCE(display->drm, num_handlers != 1,
+ "DMC %d has %d handlers for event 0x%x\n",
+ dmc_id, num_handlers, event_id);
+}
+
/**
* intel_dmc_block_pkgc() - block PKG C-state
* @display: display instance
void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
enum pipe pipe, bool enable)
{
- u32 val;
-
- if (enable)
- val = DMC_EVT_CTL_ENABLE | DMC_EVT_CTL_RECURRING |
- REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
- REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- PIPEDMC_EVENT_VBLANK);
- else
- val = dmc_evt_ctl_disable();
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
- intel_de_write(display, MTL_PIPEDMC_EVT_CTL_4(pipe),
- val);
+ dmc_configure_event(display, dmc_id, PIPEDMC_EVENT_VBLANK, enable);
}
static bool disable_dmc_evt(struct intel_display *display,
#define MTL_PIPEDMC_CONTROL _MMIO(0x45250)
#define PIPEDMC_ENABLE_MTL(pipe) REG_BIT(((pipe) - PIPE_A) * 4)
-#define _MTL_PIPEDMC_EVT_CTL_4_A 0x5f044
-#define _MTL_PIPEDMC_EVT_CTL_4_B 0x5f444
-#define MTL_PIPEDMC_EVT_CTL_4(pipe) _MMIO_PIPE(pipe, \
- _MTL_PIPEDMC_EVT_CTL_4_A, \
- _MTL_PIPEDMC_EVT_CTL_4_B)
-
#define _PIPEDMC_STATUS_A 0x5f06c
#define _PIPEDMC_STATUS_B 0x5f46c
#define PIPEDMC_STATUS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_STATUS_A, _PIPEDMC_STATUS_B)