drm_atomic_private_obj_init(adev_to_drm(adev),
&adev->dm.atomic_obj,
- NULL,
&dm_atomic_state_funcs);
r = amdgpu_display_modeset_create_props(adev);
static int komeda_layer_obj_add(struct komeda_kms_dev *kms,
struct komeda_layer *layer)
{
- drm_atomic_private_obj_init(&kms->base, &layer->base.obj, NULL,
+ drm_atomic_private_obj_init(&kms->base, &layer->base.obj,
&komeda_layer_obj_funcs);
return 0;
}
struct komeda_scaler *scaler)
{
drm_atomic_private_obj_init(&kms->base,
- &scaler->base.obj, NULL,
+ &scaler->base.obj,
&komeda_scaler_obj_funcs);
return 0;
}
static int komeda_compiz_obj_add(struct komeda_kms_dev *kms,
struct komeda_compiz *compiz)
{
- drm_atomic_private_obj_init(&kms->base, &compiz->base.obj, NULL,
+ drm_atomic_private_obj_init(&kms->base, &compiz->base.obj,
&komeda_compiz_obj_funcs);
return 0;
struct komeda_splitter *splitter)
{
drm_atomic_private_obj_init(&kms->base,
- &splitter->base.obj, NULL,
+ &splitter->base.obj,
&komeda_splitter_obj_funcs);
return 0;
struct komeda_merger *merger)
{
drm_atomic_private_obj_init(&kms->base,
- &merger->base.obj, NULL,
+ &merger->base.obj,
&komeda_merger_obj_funcs);
return 0;
static int komeda_improc_obj_add(struct komeda_kms_dev *kms,
struct komeda_improc *improc)
{
- drm_atomic_private_obj_init(&kms->base, &improc->base.obj, NULL,
+ drm_atomic_private_obj_init(&kms->base, &improc->base.obj,
&komeda_improc_obj_funcs);
return 0;
static int komeda_timing_ctrlr_obj_add(struct komeda_kms_dev *kms,
struct komeda_timing_ctrlr *ctrlr)
{
- drm_atomic_private_obj_init(&kms->base, &ctrlr->base.obj, NULL,
+ drm_atomic_private_obj_init(&kms->base, &ctrlr->base.obj,
&komeda_timing_ctrlr_obj_funcs);
return 0;
static int komeda_pipeline_obj_add(struct komeda_kms_dev *kms,
struct komeda_pipeline *pipe)
{
- drm_atomic_private_obj_init(&kms->base, &pipe->obj, NULL,
+ drm_atomic_private_obj_init(&kms->base, &pipe->obj,
&komeda_pipeline_obj_funcs);
return 0;
mgr->conn_base_id = conn_base_id;
drm_atomic_private_obj_init(dev, &mgr->base,
- NULL,
&drm_dp_mst_topology_state_funcs);
return 0;
group->available_bw = -1;
INIT_LIST_HEAD(&group->tunnels);
- drm_atomic_private_obj_init(mgr->dev, &group->base, NULL,
+ drm_atomic_private_obj_init(mgr->dev, &group->base,
&tunnel_group_funcs);
return true;
* drm_atomic_private_obj_init - initialize private object
* @dev: DRM device this object will be attached to
* @obj: private object
- * @state: initial private object state
* @funcs: pointer to the struct of function pointers that identify the object
* type
*
*/
int drm_atomic_private_obj_init(struct drm_device *dev,
struct drm_private_obj *obj,
- struct drm_private_state *state,
const struct drm_private_state_funcs *funcs)
{
+ struct drm_private_state *state;
memset(obj, 0, sizeof(*obj));
drm_modeset_lock_init(&obj->lock);
obj->funcs = funcs;
list_add_tail(&obj->head, &dev->mode_config.privobj_list);
- /*
- * Not all users of drm_atomic_private_obj_init have been
- * converted to using &drm_private_obj_funcs.atomic_create_state yet.
- * For the time being, let's only call reset if the passed state is
- * NULL. Otherwise, we will fallback to the previous behaviour.
- */
- if (!state) {
- state = obj->funcs->atomic_create_state(obj);
- if (IS_ERR(state))
- return PTR_ERR(state);
+ state = obj->funcs->atomic_create_state(obj);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
- obj->state = state;
- } else {
- obj->state = state;
- state->obj = obj;
- }
+ obj->state = state;
return 0;
}
if (drm_bridge_is_atomic(bridge))
drm_atomic_private_obj_init(bridge->dev, &bridge->base,
- NULL,
&drm_bridge_priv_state_funcs);
return 0;
goto err_devclk_disable;
}
- drm_atomic_private_obj_init(drm, &priv->private_obj, NULL,
+ drm_atomic_private_obj_init(drm, &priv->private_obj,
&ingenic_drm_private_state_funcs);
ret = drmm_add_action_or_reset(drm, ingenic_drm_atomic_private_obj_fini,
return err;
}
- drm_atomic_private_obj_init(drm, &ipu->private_obj, NULL,
+ drm_atomic_private_obj_init(drm, &ipu->private_obj,
&ingenic_ipu_private_state_funcs);
return 0;
dev->mode_config.cursor_height = 512;
drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
- NULL,
&dpu_kms_global_state_funcs);
atomic_set(&dpu_kms->bandwidth_ref, 0);
mdp5_kms->dev = dev;
drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state,
- NULL,
&mdp5_global_state_funcs);
/* we need to set a default rate before enabling. Set a safe
{
struct omap_drm_private *priv = dev->dev_private;
- drm_atomic_private_obj_init(dev, &priv->glob_obj, NULL,
+ drm_atomic_private_obj_init(dev, &priv->glob_obj,
&omap_global_state_funcs);
return 0;
}
struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
- drm_atomic_private_obj_init(drm, &hub->base, NULL,
+ drm_atomic_private_obj_init(drm, &hub->base,
&tegra_display_hub_state_funcs);
tegra->hub = hub;
{
drm_modeset_lock_init(&vc4->ctm_state_lock);
- drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, NULL,
+ drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager,
&vc4_ctm_state_funcs);
return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
{
drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
- NULL,
&vc4_load_tracker_state_funcs);
return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
{
drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
- NULL,
&vc4_hvs_state_funcs);
return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
int drm_atomic_private_obj_init(struct drm_device *dev,
struct drm_private_obj *obj,
- struct drm_private_state *state,
const struct drm_private_state_funcs *funcs);
void drm_atomic_private_obj_fini(struct drm_private_obj *obj);