intel_de_posting_read(display, DSPFW3(display));
} else if (display->platform.i945g || display->platform.i945gm) {
was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
- val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
- _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+ val = enable ? REG_MASKED_FIELD_ENABLE(FW_BLC_SELF_EN) :
+ REG_MASKED_FIELD_DISABLE(FW_BLC_SELF_EN);
intel_de_write(display, FW_BLC_SELF, val);
intel_de_posting_read(display, FW_BLC_SELF);
} else if (display->platform.i915gm) {
* FW_BLC_SELF. What's going on?
*/
was_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN;
- val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
- _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+ val = enable ? REG_MASKED_FIELD_ENABLE(INSTPM_SELF_EN) :
+ REG_MASKED_FIELD_DISABLE(INSTPM_SELF_EN);
intel_de_write(display, INSTPM, val);
intel_de_posting_read(display, INSTPM);
} else {
*/
if (display->irq.vblank_enabled++ == 0)
intel_de_write(display, SCPD0,
- _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ REG_MASKED_FIELD_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
static void i915gm_irq_cstate_wa_disable(struct intel_display *display)
if (--display->irq.vblank_enabled == 0)
intel_de_write(display, SCPD0,
- _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ REG_MASKED_FIELD_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i915gm_irq_cstate_wa(struct intel_display *display, bool enable)
if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
intel_uncore_write(uncore,
GFX_MODE,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ REG_MASKED_FIELD_ENABLE(GFX_PPGTT_ENABLE));
}
/* PPGTT support for Sandybdrige/Gen6 and later */
engine->class == VIDEO_ENHANCEMENT_CLASS ||
engine->class == COMPUTE_CLASS ||
engine->class == OTHER_CLASS))
- engine->tlb_inv.request = _MASKED_BIT_ENABLE(val);
+ engine->tlb_inv.request = REG_MASKED_FIELD_ENABLE(val);
else
engine->tlb_inv.request = val;
const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
int err;
- intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
+ intel_uncore_write_fw(uncore, mode, REG_MASKED_FIELD_ENABLE(STOP_RING));
/*
* Wa_22011802037: Prior to doing a reset, ensure CS is
*/
if (intel_engine_reset_needs_wa_22011802037(engine->gt))
intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
- _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
+ REG_MASKED_FIELD_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
err = __intel_wait_for_register_fw(engine->uncore, mode,
MODE_IDLE, MODE_IDLE,
{
ENGINE_TRACE(engine, "\n");
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, REG_MASKED_FIELD_DISABLE(STOP_RING));
}
static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
return;
intel_uncore_write(engine->uncore, GEN12_RCU_MODE,
- _MASKED_BIT_ENABLE(GEN12_RCU_MODE_CCS_ENABLE));
+ REG_MASKED_FIELD_ENABLE(GEN12_RCU_MODE_CCS_ENABLE));
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
if (MEDIA_VER(i915) >= 13 && engine->id == GSC0) {
intel_uncore_write(engine->gt->uncore,
RC_PSMI_CTRL_GSCCS,
- _MASKED_BIT_DISABLE(IDLE_MSG_DISABLE));
+ REG_MASKED_FIELD_DISABLE(IDLE_MSG_DISABLE));
/* hysteresis 0xA=5us as recommended in spec*/
intel_uncore_write(engine->gt->uncore,
PWRCTX_MAXCNT_GSCCS,
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
if (GRAPHICS_VER(engine->i915) >= 11)
- mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
+ mode = REG_MASKED_FIELD_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
else
- mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+ mode = REG_MASKED_FIELD_ENABLE(GFX_RUN_LIST_ENABLE);
ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, REG_MASKED_FIELD_DISABLE(STOP_RING));
ENGINE_WRITE_FW(engine,
RING_HWS_PGA,
if (GRAPHICS_VER(i915) == 6)
intel_uncore_write(uncore,
ARB_MODE,
- _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ REG_MASKED_FIELD_ENABLE(ARB_MODE_SWIZZLE_SNB));
else if (GRAPHICS_VER(i915) == 7)
intel_uncore_write(uncore,
ARB_MODE,
- _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ REG_MASKED_FIELD_ENABLE(ARB_MODE_SWIZZLE_IVB));
else if (GRAPHICS_VER(i915) == 8)
intel_uncore_write(uncore,
GAMTARBMODE,
- _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
+ REG_MASKED_FIELD_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
MISSING_CASE(GRAPHICS_VER(i915));
}
u32 ctl;
int loc;
- ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ ctl = REG_MASKED_FIELD_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ ctl |= REG_MASKED_FIELD_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (inhibit)
ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
if (GRAPHICS_VER(engine->i915) < 11)
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
- CTX_CTRL_RS_CTX_ENABLE);
+ ctl |= REG_MASKED_FIELD_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
/* Wa_14019159160 - Case 2.*/
if (ctx_needs_runalone(ce))
- ctl |= _MASKED_BIT_ENABLE(GEN12_CTX_CTRL_RUNALONE_MODE);
+ ctl |= REG_MASKED_FIELD_ENABLE(GEN12_CTX_CTRL_RUNALONE_MODE);
regs[CTX_CONTEXT_CONTROL] = ctl;
regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
{
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN12_CS_DEBUG_MODE2);
- *cs++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
+ *cs++ = REG_MASKED_FIELD_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
return cs;
}
/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
{
COMMON_SLICE_CHICKEN2,
- _MASKED_BIT_DISABLE(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE),
+ REG_MASKED_FIELD_DISABLE(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE),
},
/* BSpec: 11391 */
{
FF_SLICE_CHICKEN,
- _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
+ REG_MASKED_FIELD_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
},
/* BSpec: 11299 */
{
_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
+ REG_MASKED_FIELD_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
}
};
/* Allows RC6 residency counter to work */
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
+ REG_MASKED_FIELD_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
/* 3: Enable RC6 */
rc6->ctl_enable = GEN7_RC_CTL_TO_MODE;
/* Allows RC6 residency counter to work */
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC0_COUNT_EN |
- VLV_RENDER_RC0_COUNT_EN |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
+ REG_MASKED_FIELD_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
rc6->ctl_enable =
GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
* set the high bit to be safe.
*/
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ REG_MASKED_FIELD_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = intel_uncore_read_fw(uncore, reg);
do {
tmp = upper;
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
+ REG_MASKED_FIELD_DISABLE(VLV_COUNT_RANGE_HIGH));
lower = intel_uncore_read_fw(uncore, reg);
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ REG_MASKED_FIELD_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = intel_uncore_read_fw(uncore, reg);
} while (upper != tmp && --loop);
return 0;
}
- intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
+ intel_uncore_write_fw(uncore, reg, REG_MASKED_FIELD_ENABLE(request));
ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
700, 0, NULL);
if (ret)
{
intel_uncore_write_fw(engine->uncore,
RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+ REG_MASKED_FIELD_DISABLE(RESET_CTL_REQUEST_RESET));
}
static int gen8_reset_engines(struct intel_gt *gt,
engine->name);
ENGINE_WRITE_FW(engine, RING_INSTPM,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
+ REG_MASKED_FIELD_ENABLE(INSTPM_TLB_INVALIDATE | INSTPM_SYNC_FLUSH));
if (__intel_wait_for_register_fw(engine->uncore,
RING_INSTPM(engine->mmio_base),
INSTPM_SYNC_FLUSH, 0,
if (GRAPHICS_VER(engine->i915) >= 7) {
ENGINE_WRITE_FW(engine,
RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ REG_MASKED_FIELD_ENABLE(GFX_PPGTT_ENABLE));
}
}
if (GRAPHICS_VER(engine->i915) > 2) {
ENGINE_WRITE_FW(engine,
- RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ RING_MI_MODE, REG_MASKED_FIELD_DISABLE(STOP_RING));
ENGINE_POSTING_READ(engine, RING_MI_MODE);
}
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
- *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
+ *cs++ = REG_MASKED_FIELD_ENABLE(INSTPM_TLB_INVALIDATE);
intel_ring_advance(rq, cs);
*cs++ = i915_mmio_reg_offset(
RING_PSMI_CTL(signaller->mmio_base));
- *cs++ = _MASKED_BIT_ENABLE(
- GEN6_PSMI_SLEEP_MSG_DISABLE);
+ *cs++ = REG_MASKED_FIELD_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
} else if (GRAPHICS_VER(i915) == 5) {
last_reg = RING_PSMI_CTL(signaller->mmio_base);
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = _MASKED_BIT_DISABLE(
- GEN6_PSMI_SLEEP_MSG_DISABLE);
+ *cs++ = REG_MASKED_FIELD_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE);
}
/* Insert a delay before the next switch! */
* will then assume that it is busy and bring it out of rc6.
*/
intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),
- _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ REG_MASKED_FIELD_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
/* Clear the context id. Here be magic! */
intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
* and so let it sleep to conserve power when idle.
*/
intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),
- _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ REG_MASKED_FIELD_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
+ wa_add(wal, reg, 0, REG_MASKED_FIELD_ENABLE(val), val, true);
}
static void
wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
{
- wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
+ wa_mcr_add(wal, reg, 0, REG_MASKED_FIELD_ENABLE(val), val, true);
}
static void
wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
+ wa_add(wal, reg, 0, REG_MASKED_FIELD_DISABLE(val), val, true);
}
static void
wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
{
- wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
+ wa_mcr_add(wal, reg, 0, REG_MASKED_FIELD_DISABLE(val), val, true);
}
static void
wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
u32 mask, u32 val)
{
- wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
+ wa_add(wal, reg, 0, REG_MASKED_FIELD(mask, val), mask, true);
}
static void
wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
u32 mask, u32 val)
{
- wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
+ wa_mcr_add(wal, reg, 0, REG_MASKED_FIELD(mask, val), mask, true);
}
static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaEnableFloatBlendOptimization:icl */
wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
+ REG_MASKED_FIELD_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
0 /* write-only, so skip validation */,
true);
wa_add(wal,
HSW_ROW_CHICKEN3, 0,
- _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
+ REG_MASKED_FIELD_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
0 /* XXX does this reg exist? */, true);
/* WaVSRefCountFullforceMissDisable:hsw */
IS_DG2(i915)) {
/* Wa_14015150844 */
wa_mcr_add(wal, XEHP_HDC_CHICKEN0, 0,
- _MASKED_BIT_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
+ REG_MASKED_FIELD_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
0, true);
}
if (IS_GRAPHICS_VER(i915, 4, 6))
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
- 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
+ 0, REG_MASKED_FIELD_ENABLE(VS_TIMER_DISPATCH),
/* XXX bit doesn't stick on Broadwater */
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
* enabled.
*/
wa_add(wal, ECOSKPD(RENDER_RING_BASE),
- 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
+ 0, REG_MASKED_FIELD_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
0 /* XXX bit doesn't stick on Broadwater */,
true);
}
* we need to explicitly skip the readback.
*/
wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
+ REG_MASKED_FIELD_ENABLE(ENABLE_PREFETCH_INTO_IC),
0 /* write-only, so skip validation */,
true);
}
{
ENGINE_WRITE_FW(engine,
RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+ REG_MASKED_FIELD_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, REG_MASKED_FIELD_DISABLE(STOP_RING));
ENGINE_POSTING_READ(engine, RING_MI_MODE);
}
/* Start the DMA */
intel_uncore_write_fw(uncore, DMA_CTRL,
- _MASKED_BIT_ENABLE(dma_flags | START_DMA));
+ REG_MASKED_FIELD_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100, NULL);
intel_uncore_read_fw(uncore, DMA_CTRL));
/* Disable the bits once DMA is over */
- intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+ intel_uncore_write_fw(uncore, DMA_CTRL, REG_MASKED_FIELD_DISABLE(dma_flags));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
bool enable_execlist;
int ret;
- (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
+ (*(u32 *)p_data) &= ~REG_MASKED_FIELD_ENABLE(1);
if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
IS_COMETLAKE(vgpu->gvt->gt->i915))
- (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
+ (*(u32 *)p_data) &= ~REG_MASKED_FIELD_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
if (IS_MASKED_BITS_ENABLED(data, 1)) {
if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
data |= RESET_CTL_READY_TO_RESET;
- else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
+ else if (data & REG_MASKED_FIELD_DISABLE(RESET_CTL_REQUEST_RESET))
data &= ~RESET_CTL_READY_TO_RESET;
vgpu_vreg(vgpu, offset) = data;
{
u32 data = *(u32 *)p_data;
- (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
+ (*(u32 *)p_data) &= ~REG_MASKED_FIELD_ENABLE(0x18);
write_vreg(vgpu, offset, p_data, bytes);
if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
#define RING_REG(base) _MMIO((base) + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0,
- ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ~REG_MASKED_FIELD_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
ring_reset_ctl_write);
#undef RING_REG
{
const u32 *reg_state = ce->lrc_reg_state;
u32 inhibit_mask =
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
return inhibit_mask ==
(reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
#define IS_MASKED_BITS_ENABLED(_val, _b) \
- (((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b))
+ (((_val) & REG_MASKED_FIELD_ENABLE(_b)) == REG_MASKED_FIELD_ENABLE(_b))
#define IS_MASKED_BITS_DISABLED(_val, _b) \
- ((_val) & _MASKED_BIT_DISABLE(_b))
+ ((_val) & REG_MASKED_FIELD_DISABLE(_b))
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
{
RING_CONTEXT_CONTROL(ce->engine->mmio_base),
CTX_CONTEXT_CONTROL,
- _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
- active ?
- GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
- 0)
+ REG_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
+ active ?
+ GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
+ 0)
},
};
*/
if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
intel_uncore_write(uncore, GEN8_OA_DEBUG,
- _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
- GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
+ REG_MASKED_FIELD_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
}
/*
static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
{
- return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
- (stream->sample_flags & SAMPLE_OA_REPORT) ?
- 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+ return REG_MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
+ (stream->sample_flags & SAMPLE_OA_REPORT) ?
+ 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
static int
*/
if (IS_DG2(i915)) {
intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
- _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(STALL_DOP_GATING_DISABLE));
intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN12_DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_ENABLE(GEN12_DISABLE_DOP_GATING));
}
intel_uncore_write(uncore, __oa_regs(stream)->oa_debug,
/* Disable clk ratio reports, like previous Gens. */
- _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
- GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
+ REG_MASKED_FIELD_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
/*
* If the user didn't require OA reports, instruct
* the hardware not to emit ctx switch reports.
*/
if (IS_DG2(i915)) {
intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
- _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
+ REG_MASKED_FIELD_DISABLE(STALL_DOP_GATING_DISABLE));
intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_DISABLE(GEN12_DISABLE_DOP_GATING));
}
/* disable the context save/restore or OAR counters */
* programmed by userspace doesn't change this.
*/
if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
- val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
+ val = val & ~REG_MASKED_FIELD_ENABLE(GEN8_ST_PO_DISABLE);
/*
* WAIT_FOR_RC6_EXIT has only one bit fulfilling the function
* configs.
*/
if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
- val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
+ val = val & ~REG_MASKED_FIELD_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
return val;
}
BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
-#define _MASKED_FIELD(mask, value) \
+#define REG_MASKED_FIELD(mask, value) \
(BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(mask), (mask) & 0xffff0000, 0)) + \
BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(value), (value) & 0xffff0000, 0)) + \
BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(mask) && __builtin_constant_p(value), (value) & ~(mask), 0)) + \
((mask) << 16 | (value)))
-#define _MASKED_BIT_ENABLE(a) \
- (__builtin_choose_expr(__builtin_constant_p(a), _MASKED_FIELD((a), (a)), ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })))
+#define REG_MASKED_FIELD_ENABLE(a) \
+ (__builtin_choose_expr(__builtin_constant_p(a), REG_MASKED_FIELD((a), (a)), ({ typeof(a) _a = (a); REG_MASKED_FIELD(_a, _a); })))
-#define _MASKED_BIT_DISABLE(a) \
- (_MASKED_FIELD((a), 0))
+#define REG_MASKED_FIELD_DISABLE(a) \
+ (REG_MASKED_FIELD((a), 0))
/*
* Given the first two numbers __a and __b of arbitrarily many evenly spaced
GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME, 0);
intel_uncore_write(&i915->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
- _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+ REG_MASKED_FIELD_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableSDEUnitClockGating:bdw */
intel_uncore_rmw(&i915->uncore, GEN8_UCGCTL6, 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
if (INTEL_INFO(i915)->gt == 1)
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(DOP_CLOCK_GATING_DISABLE));
else {
/* must write both registers */
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(DOP_CLOCK_GATING_DISABLE));
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2_GT2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(DOP_CLOCK_GATING_DISABLE));
}
/*
/* WaDisableDopClockGating:vlv */
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* This is required by WaCatErrorRejectionIssue:vlv */
intel_uncore_rmw(&i915->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
/* WaDisableSemaphoreAndSyncFlipWait:chv */
intel_uncore_write(&i915->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
- _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+ REG_MASKED_FIELD_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableCSUnitClockGating:chv */
intel_uncore_rmw(&i915->uncore, GEN6_UCGCTL1, 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
intel_uncore_write16(uncore, DEUC, 0);
intel_uncore_write(uncore,
MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void i965g_init_clock_gating(struct drm_i915_private *i915)
I965_FBC_CLOCK_GATE_DISABLE);
intel_uncore_write(&i915->uncore, RENCLK_GATE_D2, 0);
intel_uncore_write(&i915->uncore, MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void gen3_init_clock_gating(struct drm_i915_private *i915)
if (IS_PINEVIEW(i915))
intel_uncore_write(&i915->uncore, ECOSKPD(RENDER_RING_BASE),
- _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+ REG_MASKED_FIELD_ENABLE(ECO_GATING_CX_ONLY));
/* IIR "flip pending" means done if this bit is set */
intel_uncore_write(&i915->uncore, ECOSKPD(RENDER_RING_BASE),
- _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+ REG_MASKED_FIELD_DISABLE(ECO_FLIP_DONE));
/* interrupts should cause a wake up from C3 */
- intel_uncore_write(&i915->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+ intel_uncore_write(&i915->uncore, INSTPM, REG_MASKED_FIELD_ENABLE(INSTPM_AGPBUSY_INT_EN));
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
intel_uncore_write(&i915->uncore, MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+ REG_MASKED_FIELD_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
intel_uncore_write(&i915->uncore, MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void i85x_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_write(&i915->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
/* interrupts should cause a wake up from C3 */
- intel_uncore_write(&i915->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
- _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
+ intel_uncore_write(&i915->uncore, MI_STATE, REG_MASKED_FIELD_ENABLE(MI_AGPBUSY_INT_EN) |
+ REG_MASKED_FIELD_DISABLE(MI_AGPBUSY_830_MODE));
intel_uncore_write(&i915->uncore, MEM_MODE,
- _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
/*
* Have FBC ignore 3D activity since we use software
* until a 2D blit occurs.
*/
intel_uncore_write(&i915->uncore, SCPD0,
- _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
+ REG_MASKED_FIELD_ENABLE(SCPD_FBC_IGNORE_3D));
}
static void i830_init_clock_gating(struct drm_i915_private *i915)
{
intel_uncore_write(&i915->uncore, MEM_MODE,
- _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
- _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
+ REG_MASKED_FIELD_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
}
void intel_clock_gating_init(struct drm_device *drm)
}
#define fw_ack(d) readl((d)->reg_ack)
-#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
-#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
+#define fw_set(d, val) writel(REG_MASKED_FIELD_ENABLE((val)), (d)->reg_set)
+#define fw_clear(d, val) writel(REG_MASKED_FIELD_DISABLE((val)), (d)->reg_set)
static inline void
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
static void kcr_pxp_set_status(const struct intel_pxp *pxp, bool enable)
{
- u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
- _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
+ u32 val = enable ? REG_MASKED_FIELD_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
+ REG_MASKED_FIELD_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
intel_uncore_write(pxp->ctrl_gt->uncore, KCR_INIT(pxp->kcr_base), val);
}
* On Xe2 and later GPUs, the bit has to be cleared by writing 0 to it.
*/
if (GRAPHICS_VER(xe) >= 20)
- write_ptr_reg = _MASKED_BIT_DISABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+ write_ptr_reg = REG_MASKED_FIELD_DISABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
else
- write_ptr_reg = _MASKED_BIT_ENABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+ write_ptr_reg = REG_MASKED_FIELD_ENABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT, write_ptr_reg, group, instance);
}
/* Read pointer can overflow into one additional bit */
read_ptr &= (buf_size << 1) - 1;
read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, (read_ptr >> 6));
- read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
+ read_ptr_reg = REG_MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT1, read_ptr_reg, group, instance);
xecore_buf->read = read_ptr;
trace_xe_eu_stall_data_read(group, instance, read_ptr, write_ptr,
if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_ENABLE(DISABLE_DOP_GATING));
for_each_dss_steering(xecore, gt, group, instance) {
write_ptr_reg = xe_gt_mcr_unicast_read(gt, XEHPC_EUSTALL_REPORT, group, instance);
clear_dropped_eviction_line_bit(gt, group, instance);
write_ptr = REG_FIELD_GET(XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK, write_ptr_reg);
read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, write_ptr);
- read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
+ read_ptr_reg = REG_MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
/* Initialize the read pointer to the write pointer */
xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT1, read_ptr_reg, group, instance);
write_ptr <<= 6;
stream->data_drop.reported_to_user = false;
bitmap_zero(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS);
- reg_value = _MASKED_FIELD(EUSTALL_MOCS | EUSTALL_SAMPLE_RATE,
- REG_FIELD_PREP(EUSTALL_MOCS, gt->mocs.uc_index << 1) |
- REG_FIELD_PREP(EUSTALL_SAMPLE_RATE,
- stream->sampling_rate_mult));
+ reg_value = REG_MASKED_FIELD(EUSTALL_MOCS | EUSTALL_SAMPLE_RATE,
+ REG_FIELD_PREP(EUSTALL_MOCS, gt->mocs.uc_index << 1) |
+ REG_FIELD_PREP(EUSTALL_SAMPLE_RATE,
+ stream->sampling_rate_mult));
xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_CTRL, reg_value);
/* GGTT addresses can never be > 32 bits */
xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_BASE_UPPER, 0);
if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
- _MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_DISABLE(DISABLE_DOP_GATING));
xe_force_wake_put(gt_to_fw(gt), stream->fw_ref);
xe_pm_runtime_put(gt_to_xe(gt));
struct xe_mmio *mmio = >->mmio;
struct xe_device *xe = gt_to_xe(gt);
u64 lrc_desc;
- u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
+ u32 ring_mode = REG_MASKED_FIELD_ENABLE(GFX_DISABLE_LEGACY_MODE);
lrc_desc = xe_lrc_descriptor(lrc);
if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
xe_mmio_write32(mmio, RCU_MODE,
- _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
+ REG_MASKED_FIELD_ENABLE(RCU_MODE_CCS_ENABLE));
xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
lrc->ring.old_tail = lrc->ring.tail;
xe_mmio_read32(mmio, RING_HWS_PGA(hwe->mmio_base));
if (xe_device_has_msix(gt_to_xe(hwe->gt)))
- ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ ring_mode |= REG_MASKED_FIELD_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base), ring_mode);
xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
{
u32 ccs_mask =
xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
- u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
+ u32 ring_mode = REG_MASKED_FIELD_ENABLE(GFX_DISABLE_LEGACY_MODE);
if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
- _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
+ REG_MASKED_FIELD_ENABLE(RCU_MODE_CCS_ENABLE));
xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
xe_bo_ggtt_addr(hwe->hwsp));
if (xe_device_has_msix(gt_to_xe(hwe->gt)))
- ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ ring_mode |= REG_MASKED_FIELD_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
- _MASKED_BIT_DISABLE(STOP_RING));
+ REG_MASKED_FIELD_DISABLE(STOP_RING));
xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
}
static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
{
- regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
- CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ regs[CTX_CONTEXT_CONTROL] = REG_MASKED_FIELD_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (xe_gt_has_indirect_ring_state(hwe->gt))
regs[CTX_CONTEXT_CONTROL] |=
- _MASKED_BIT_ENABLE(CTX_CTRL_INDIRECT_RING_STATE_ENABLE);
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_INDIRECT_RING_STATE_ENABLE);
}
static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
*cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
*cmd++ = CS_DEBUG_MODE2(0).addr;
- *cmd++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
+ *cmd++ = REG_MASKED_FIELD_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
return cmd - batch;
}
if (init_flags & XE_LRC_CREATE_RUNALONE)
xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
- _MASKED_BIT_ENABLE(CTX_CTRL_RUN_ALONE));
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_RUN_ALONE));
if (init_flags & XE_LRC_CREATE_PXP)
xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
- _MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE));
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_PXP_ENABLE));
lrc->ctx_timestamp = 0;
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0);
},
{
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
- enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
+ REG_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
+ enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
},
};
},
{
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
- enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
- _MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
+ REG_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
+ enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
+ REG_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
},
};
static u32 oag_configure_mmio_trigger(const struct xe_oa_stream *stream, bool enable)
{
- return _MASKED_FIELD(OAG_OA_DEBUG_DISABLE_MMIO_TRG,
- enable && stream && stream->sample ?
- 0 : OAG_OA_DEBUG_DISABLE_MMIO_TRG);
+ return REG_MASKED_FIELD(OAG_OA_DEBUG_DISABLE_MMIO_TRG,
+ enable && stream && stream->sample ?
+ 0 : OAG_OA_DEBUG_DISABLE_MMIO_TRG);
}
static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
/* Enable thread stall DOP gating and EU DOP gating. */
if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
- _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
+ REG_MASKED_FIELD_DISABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
- _MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_DISABLE(DISABLE_DOP_GATING));
}
xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream)
{
/* If user didn't require OA reports, ask HW not to emit ctx switch reports */
- return _MASKED_FIELD(OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
- stream->sample ?
- 0 : OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+ return REG_MASKED_FIELD(OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
+ stream->sample ?
+ 0 : OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
static u32 oag_buf_size_select(const struct xe_oa_stream *stream)
{
- return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT,
- xe_bo_size(stream->oa_buffer.bo) > SZ_16M ?
- OAG_OA_DEBUG_BUF_SIZE_SELECT : 0);
+ return REG_MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT,
+ xe_bo_size(stream->oa_buffer.bo) > SZ_16M ?
+ OAG_OA_DEBUG_BUF_SIZE_SELECT : 0);
}
static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
*/
if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
- _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_ENABLE(DISABLE_DOP_GATING));
}
/* Disable clk ratio reports */
OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL;
xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
- _MASKED_BIT_ENABLE(oa_debug) |
+ REG_MASKED_FIELD_ENABLE(oa_debug) |
oag_report_ctx_switches(stream) |
oag_buf_size_select(stream) |
oag_configure_mmio_trigger(stream, true));
static int kcr_pxp_set_status(const struct xe_pxp *pxp, bool enable)
{
- u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
- _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
+ u32 val = enable ? REG_MASKED_FIELD_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
+ REG_MASKED_FIELD_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
CLASS(xe_force_wake, fw_ref)(gt_to_fw(pxp->gt), XE_FW_GT);
if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
/* Start the DMA */
xe_mmio_write32(mmio, DMA_CTRL,
- _MASKED_BIT_ENABLE(dma_flags | START_DMA));
+ REG_MASKED_FIELD_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
ret = xe_mmio_wait32(mmio, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl,
xe_uc_fw_type_repr(uc_fw->type), dma_ctrl);
/* Disable the bits once DMA is over */
- xe_mmio_write32(mmio, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+ xe_mmio_write32(mmio, DMA_CTRL, REG_MASKED_FIELD_DISABLE(dma_flags));
return ret;
}