struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
int i;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
if (!wakeref)
- return 0;
+ return NULL;
mutex_lock(&power_domains->lock);
if (!is_enabled) {
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- wakeref = 0;
+ wakeref = NULL;
}
return wakeref;
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
- intel_wakeref_t old_work_wakeref = 0;
+ intel_wakeref_t old_work_wakeref = NULL;
mutex_lock(&power_domains->lock);
#define with_intel_display_power(i915, domain, wf) \
for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
- intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+ intel_display_power_put_async((i915), (domain), (wf)), (wf) = NULL)
#define with_intel_display_power_if_enabled(i915, domain, wf) \
for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \
- intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+ intel_display_power_put_async((i915), (domain), (wf)), (wf) = NULL)
#endif /* __INTEL_DISPLAY_POWER_H__ */
mutex_unlock(&display->pps.mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
- return 0;
+ return NULL;
}
static void
},
{ NULL, 0 },
}, *phase;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
unsigned long count = 0;
unsigned long scanned = 0;
int err = 0, i = 0;
struct ttm_buffer_object *bo = area->vm_private_data;
struct drm_device *dev = bo->base.dev;
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
vm_fault_t ret;
int idx;
static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
assert_object_held_shared(obj);
if (!--b->irq_enabled)
b->irq_disable(b);
- WRITE_ONCE(b->irq_armed, 0);
+ WRITE_ONCE(b->irq_armed, NULL);
intel_gt_pm_put_async(b->irq_engine->gt, wakeref);
}
static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt)
{
if (!intel_wakeref_get_if_active(>->wakeref))
- return 0;
+ return NULL;
return intel_wakeref_track(>->wakeref);
}
}
#define with_intel_gt_pm(gt, wf) \
- for (wf = intel_gt_pm_get(gt); wf; intel_gt_pm_put(gt, wf), wf = 0)
+ for ((wf) = intel_gt_pm_get(gt); (wf); intel_gt_pm_put((gt), (wf)), (wf) = NULL)
/**
* with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
* @wf: pointer to a temporary wakeref.
*/
#define with_intel_gt_pm_if_awake(gt, wf) \
- for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt, wf), wf = 0)
+ for ((wf) = intel_gt_pm_get_if_awake(gt); (wf); intel_gt_pm_put_async((gt), (wf)), (wf) = NULL)
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
* start_gt_clk is derived from GuC state. To get a consistent
* view of activity, we query the GuC state only if gt is awake.
*/
- wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt);
+ wakeref = in_reset ? NULL : intel_gt_pm_get_if_awake(gt);
if (wakeref) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
int i915_vma_unbind(struct i915_vma *vma)
{
struct i915_address_space *vm = vma->vm;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
int err;
assert_object_held_shared(vma->obj);
{
struct drm_i915_gem_object *obj = vma->obj;
struct i915_address_space *vm = vma->vm;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
struct dma_fence *fence;
int err;
pm_runtime_get_if_active(rpm->kdev) <= 0) ||
(!ignore_usecount &&
pm_runtime_get_if_in_use(rpm->kdev) <= 0))
- return 0;
+ return NULL;
}
intel_runtime_pm_acquire(rpm, true);
#define with_intel_runtime_pm(rpm, wf) \
for ((wf) = intel_runtime_pm_get(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+ intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
#define with_intel_runtime_pm_if_in_use(rpm, wf) \
for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+ intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
#define with_intel_runtime_pm_if_active(rpm, wf) \
for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+ intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
if (!atomic_read(&wf->count)) {
INTEL_WAKEREF_BUG_ON(wf->wakeref);
wf->wakeref = wakeref;
- wakeref = 0;
+ wakeref = NULL;
ret = wf->ops->get(wf);
if (ret) {
- wakeref = xchg(&wf->wakeref, 0);
+ wakeref = xchg(&wf->wakeref, NULL);
wake_up_var(&wf->wakeref);
goto unlock;
}
static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
{
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
if (unlikely(!atomic_dec_and_test(&wf->count)))
/* ops->put() must reschedule its own release on error/deferral */
if (likely(!wf->ops->put(wf))) {
INTEL_WAKEREF_BUG_ON(!wf->wakeref);
- wakeref = xchg(&wf->wakeref, 0);
+ wakeref = xchg(&wf->wakeref, NULL);
wake_up_var(&wf->wakeref);
}
__mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
atomic_set(&wf->count, 0);
- wf->wakeref = 0;
+ wf->wakeref = NULL;
INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
lockdep_init_map(&wf->work.work.lockdep_map,
if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
return;
- wakeref = fetch_and_zero(&wf->wakeref);
+ wakeref = xchg(&wf->wakeref, NULL);
spin_unlock_irqrestore(&wf->lock, flags);
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
spin_lock_init(&wf->lock);
timer_setup(&wf->timer, wakeref_auto_timeout, 0);
refcount_set(&wf->count, 0);
- wf->wakeref = 0;
+ wf->wakeref = NULL;
wf->i915 = i915;
}
if (ctx->pxp_wakeref) {
intel_runtime_pm_put(&i915->runtime_pm,
ctx->pxp_wakeref);
- ctx->pxp_wakeref = 0;
+ ctx->pxp_wakeref = NULL;
}
spin_lock_irq(&i915->gem.contexts.lock);
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
- return xe_pm_runtime_resume_and_get(xe) ? INTEL_WAKEREF_DEF : 0;
+ return xe_pm_runtime_resume_and_get(xe) ? INTEL_WAKEREF_DEF : NULL;
}
static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
- return xe_pm_runtime_get_if_in_use(xe) ? INTEL_WAKEREF_DEF : 0;
+ return xe_pm_runtime_get_if_in_use(xe) ? INTEL_WAKEREF_DEF : NULL;
}
static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm)
#define with_intel_runtime_pm(rpm, wf) \
for ((wf) = intel_runtime_pm_get(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+ intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
#endif