const struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int max_bw = 0;
- int slice_id;
enum pipe pipe;
int i;
&crtc_state->wm.skl.plane_ddb_uv[plane_id];
unsigned int data_rate = crtc_state->data_rate[plane_id];
unsigned int dbuf_mask = 0;
+ enum dbuf_slice slice;
dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
* pessimistic, which shouldn't pose any significant
* problem anyway.
*/
- for_each_dbuf_slice_in_mask(slice_id, dbuf_mask)
- crtc_bw->used_bw[slice_id] += data_rate;
+ for_each_dbuf_slice_in_mask(dev_priv, slice, dbuf_mask)
+ crtc_bw->used_bw[slice] += data_rate;
}
}
for_each_pipe(dev_priv, pipe) {
struct intel_dbuf_bw *crtc_bw;
+ enum dbuf_slice slice;
crtc_bw = &new_bw_state->dbuf_bw[pipe];
- for_each_dbuf_slice(slice_id) {
+ for_each_dbuf_slice(dev_priv, slice) {
/*
* Current experimental observations show that contrary
* to BSpec we get underruns once we exceed 64 * CDCLK
* bumped up all the time we calculate CDCLK according
* to this formula for overall bw consumed by slices.
*/
- max_bw += crtc_bw->used_bw[slice_id];
+ max_bw += crtc_bw->used_bw[slice];
}
}
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
-#define for_each_dbuf_slice_in_mask(__slice, __mask) \
+#define for_each_dbuf_slice(__dev_priv, __slice) \
for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \
- for_each_if((BIT(__slice)) & (__mask))
+ for_each_if(INTEL_INFO(__dev_priv)->dbuf.slice_mask & BIT(__slice))
-#define for_each_dbuf_slice(__slice) \
- for_each_dbuf_slice_in_mask(__slice, BIT(I915_MAX_DBUF_SLICES) - 1)
+#define for_each_dbuf_slice_in_mask(__dev_priv, __slice, __mask) \
+ for_each_dbuf_slice((__dev_priv), (__slice)) \
+ for_each_if((__mask) & BIT(__slice))
enum port {
PORT_NONE = -1,
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- int num_slices = intel_dbuf_num_slices(dev_priv);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask;
enum dbuf_slice slice;
- drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1),
- "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n",
- req_slices, num_slices);
+ drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
+ "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
+ req_slices, slice_mask);
drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
req_slices);
*/
mutex_lock(&power_domains->lock);
- for (slice = DBUF_S1; slice < num_slices; slice++)
+ for_each_dbuf_slice(dev_priv, slice)
gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
dev_priv->dbuf.enabled_slices = req_slices;
static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
{
- int num_slices = intel_dbuf_num_slices(dev_priv);
enum dbuf_slice slice;
- for (slice = DBUF_S1; slice < (DBUF_S1 + num_slices); slice++)
+ for_each_dbuf_slice(dev_priv, slice)
intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
DBUF_TRACKER_STATE_SERVICE_MASK,
DBUF_TRACKER_STATE_SERVICE(8));
u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
{
- int i;
- int num_slices = intel_dbuf_num_slices(dev_priv);
- u8 enabled_slices_mask = 0;
+ u8 enabled_slices = 0;
+ enum dbuf_slice slice;
- for (i = 0; i < num_slices; i++) {
- if (intel_uncore_read(&dev_priv->uncore, DBUF_CTL_S(i)) & DBUF_POWER_STATE)
- enabled_slices_mask |= BIT(i);
+ for_each_dbuf_slice(dev_priv, slice) {
+ if (intel_uncore_read(&dev_priv->uncore,
+ DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
+ enabled_slices |= BIT(slice);
}
- return enabled_slices_mask;
+ return enabled_slices;
}
/*
return 0;
}
-static int intel_dbuf_size(struct drm_i915_private *dev_priv)
-{
- return INTEL_INFO(dev_priv)->dbuf.size;
-}
-
-int intel_dbuf_num_slices(struct drm_i915_private *dev_priv)
-{
- return hweight8(INTEL_INFO(dev_priv)->dbuf.slice_mask);
-}
-
static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
{
- return intel_dbuf_size(dev_priv) /
- intel_dbuf_num_slices(dev_priv);
+ return INTEL_INFO(dev_priv)->dbuf.size /
+ hweight8(INTEL_INFO(dev_priv)->dbuf.slice_mask);
}
static void
ddb->end = fls(slice_mask) * slice_size;
WARN_ON(ddb->start >= ddb->end);
- WARN_ON(ddb->end > intel_dbuf_size(dev_priv));
+ WARN_ON(ddb->end > INTEL_INFO(dev_priv)->dbuf.size);
}
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
return ret;
drm_dbg_kms(&dev_priv->drm,
- "Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
+ "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x)\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
- intel_dbuf_num_slices(dev_priv));
+ INTEL_INFO(dev_priv)->dbuf.slice_mask);
}
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
-int intel_dbuf_num_slices(struct drm_i915_private *dev_priv);
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);