ports:
properties:
port@2: false
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: fsl,imx6sx-ldb
+ then:
+ required:
+ - reg-names
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,imx6sx-ldb
+ then:
+ properties:
+ nxp,enable-termination-resistor: false
+
additionalProperties: false
examples:
if (WARN_ON(!dmabuf || !dev))
return ERR_PTR(-EINVAL);
- if (WARN_ON(importer_ops && !importer_ops->move_notify))
+ if (WARN_ON(importer_ops && !importer_ops->invalidate_mappings))
return ERR_PTR(-EINVAL);
- attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ attach = kzalloc_obj(*attach);
if (!attach)
return ERR_PTR(-ENOMEM);
kfree(st);
}
- st = kzalloc(sizeof(*st), GFP_KERNEL);
+static struct drm_private_state *
+komeda_layer_atomic_create_state(struct drm_private_obj *obj)
+{
+ struct komeda_layer_state *st;
+
++ st = kzalloc_obj(*st);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
+ komeda_component_state_reset(&st->base);
+ st->base.component = to_component(obj);
+
+ return &st->base.obj;
+}
+
static const struct drm_private_state_funcs komeda_layer_obj_funcs = {
+ .atomic_create_state = komeda_layer_atomic_create_state,
.atomic_duplicate_state = komeda_layer_atomic_duplicate_state,
.atomic_destroy_state = komeda_layer_atomic_destroy_state,
};
kfree(to_scaler_st(priv_to_comp_st(state)));
}
- st = kzalloc(sizeof(*st), GFP_KERNEL);
+static struct drm_private_state *
+komeda_scaler_atomic_create_state(struct drm_private_obj *obj)
+{
+ struct komeda_scaler_state *st;
+
++ st = kzalloc_obj(*st);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
+ komeda_component_state_reset(&st->base);
+ st->base.component = to_component(obj);
+
+ return &st->base.obj;
+}
+
static const struct drm_private_state_funcs komeda_scaler_obj_funcs = {
+ .atomic_create_state = komeda_scaler_atomic_create_state,
.atomic_duplicate_state = komeda_scaler_atomic_duplicate_state,
.atomic_destroy_state = komeda_scaler_atomic_destroy_state,
};
kfree(to_compiz_st(priv_to_comp_st(state)));
}
- st = kzalloc(sizeof(*st), GFP_KERNEL);
+static struct drm_private_state *
+komeda_compiz_atomic_create_state(struct drm_private_obj *obj)
+{
+ struct komeda_compiz_state *st;
+
++ st = kzalloc_obj(*st);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
+ komeda_component_state_reset(&st->base);
+ st->base.component = to_component(obj);
+
+ return &st->base.obj;
+}
+
static const struct drm_private_state_funcs komeda_compiz_obj_funcs = {
+ .atomic_create_state = komeda_compiz_atomic_create_state,
.atomic_duplicate_state = komeda_compiz_atomic_duplicate_state,
.atomic_destroy_state = komeda_compiz_atomic_destroy_state,
};
kfree(to_splitter_st(priv_to_comp_st(state)));
}
- st = kzalloc(sizeof(*st), GFP_KERNEL);
+static struct drm_private_state *
+komeda_splitter_atomic_create_state(struct drm_private_obj *obj)
+{
+ struct komeda_splitter_state *st;
+
++ st = kzalloc_obj(*st);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
+ komeda_component_state_reset(&st->base);
+ st->base.component = to_component(obj);
+
+ return &st->base.obj;
+}
+
static const struct drm_private_state_funcs komeda_splitter_obj_funcs = {
+ .atomic_create_state = komeda_splitter_atomic_create_state,
.atomic_duplicate_state = komeda_splitter_atomic_duplicate_state,
.atomic_destroy_state = komeda_splitter_atomic_destroy_state,
};
kfree(to_merger_st(priv_to_comp_st(state)));
}
- st = kzalloc(sizeof(*st), GFP_KERNEL);
+static struct drm_private_state *
+komeda_merger_atomic_create_state(struct drm_private_obj *obj)
+{
+ struct komeda_merger_state *st;
+
++ st = kzalloc_obj(*st);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
+ komeda_component_state_reset(&st->base);
+ st->base.component = to_component(obj);
+
+ return &st->base.obj;
+}
+
static const struct drm_private_state_funcs komeda_merger_obj_funcs = {
+ .atomic_create_state = komeda_merger_atomic_create_state,
.atomic_duplicate_state = komeda_merger_atomic_duplicate_state,
.atomic_destroy_state = komeda_merger_atomic_destroy_state,
};
kfree(to_improc_st(priv_to_comp_st(state)));
}
- st = kzalloc(sizeof(*st), GFP_KERNEL);
+static struct drm_private_state *
+komeda_improc_atomic_create_state(struct drm_private_obj *obj)
+{
+ struct komeda_improc_state *st;
+
++ st = kzalloc_obj(*st);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
+ komeda_component_state_reset(&st->base);
+ st->base.component = to_component(obj);
+
+ return &st->base.obj;
+}
+
static const struct drm_private_state_funcs komeda_improc_obj_funcs = {
+ .atomic_create_state = komeda_improc_atomic_create_state,
.atomic_duplicate_state = komeda_improc_atomic_duplicate_state,
.atomic_destroy_state = komeda_improc_atomic_destroy_state,
};
kfree(to_ctrlr_st(priv_to_comp_st(state)));
}
- st = kzalloc(sizeof(*st), GFP_KERNEL);
+static struct drm_private_state *
+komeda_timing_ctrlr_atomic_create_state(struct drm_private_obj *obj)
+{
+ struct komeda_timing_ctrlr_state *st;
+
++ st = kzalloc_obj(*st);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj);
+ komeda_component_state_reset(&st->base);
+ st->base.component = to_component(obj);
+
+ return &st->base.obj;
+}
+
static const struct drm_private_state_funcs komeda_timing_ctrlr_obj_funcs = {
+ .atomic_create_state = komeda_timing_ctrlr_atomic_create_state,
.atomic_duplicate_state = komeda_timing_ctrlr_atomic_duplicate_state,
.atomic_destroy_state = komeda_timing_ctrlr_atomic_destroy_state,
};
kfree(priv_to_pipe_st(state));
}
- st = kzalloc(sizeof(*st), GFP_KERNEL);
+static struct drm_private_state *
+komeda_pipeline_atomic_create_state(struct drm_private_obj *obj)
+{
+ struct komeda_pipeline_state *st;
+
++ st = kzalloc_obj(*st);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &st->obj);
+ st->active_comps = 0;
+ st->pipe = container_of(obj, struct komeda_pipeline, obj);
+
+ return &st->obj;
+}
+
static const struct drm_private_state_funcs komeda_pipeline_obj_funcs = {
+ .atomic_create_state = komeda_pipeline_atomic_create_state,
.atomic_duplicate_state = komeda_pipeline_atomic_duplicate_state,
.atomic_destroy_state = komeda_pipeline_atomic_destroy_state,
};
kfree(mst_state);
}
- mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
+static struct drm_private_state *
+drm_dp_mst_atomic_create_state(struct drm_private_obj *obj)
+{
+ struct drm_dp_mst_topology_mgr *mgr =
+ to_dp_mst_topology_mgr(obj);
+ struct drm_dp_mst_topology_state *mst_state;
+
++ mst_state = kzalloc_obj(*mst_state);
+ if (!mst_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &mst_state->base);
+
+ mst_state->total_avail_slots = 63;
+ mst_state->start_slot = 1;
+
+ mst_state->mgr = mgr;
+ INIT_LIST_HEAD(&mst_state->payloads);
+
+ return &mst_state->base;
+}
+
static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
struct drm_dp_mst_branch *branch)
{
free_group_state(to_group_state(state));
}
- group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);
+static struct drm_private_state *tunnel_group_atomic_create_state(struct drm_private_obj *obj)
+{
+ struct drm_dp_tunnel_group_state *group_state;
+
++ group_state = kzalloc_obj(*group_state);
+ if (!group_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &group_state->base);
+ INIT_LIST_HEAD(&group_state->tunnel_states);
+
+ return &group_state->base;
+}
+
static const struct drm_private_state_funcs tunnel_group_funcs = {
+ .atomic_create_state = tunnel_group_atomic_create_state,
.atomic_duplicate_state = tunnel_group_duplicate_state,
.atomic_destroy_state = tunnel_group_destroy_state,
};
if (!count)
return 0;
- objs = kvmalloc_array(count, sizeof(*objs), GFP_KERNEL);
- objs = kvmalloc_objs(struct drm_gem_object *, count,
- GFP_KERNEL | __GFP_ZERO);
++ objs = kvmalloc_objs(*objs, count);
if (!objs)
return -ENOMEM;
if (domain_count <= 1)
return 0;
- link_count = domain_count + (domain_count - 1);
+ if (domain_count > ARRAY_SIZE(ROGUE_PD_NAMES)) {
+ drm_err(drm_dev, "%s() only supports %zu domains on Rogue",
+ __func__, ARRAY_SIZE(ROGUE_PD_NAMES));
+ return -EOPNOTSUPP;
+ }
- domain_devs = kzalloc_objs(*domain_devs, domain_count);
- if (!domain_devs)
- return -ENOMEM;
+ link_count = domain_count - 1;
- domain_links = kcalloc(link_count, sizeof(*domain_links), GFP_KERNEL);
+ domain_links = kzalloc_objs(*domain_links, link_count);
if (!domain_links)
return -ENOMEM;
kfree(priv_state);
}
- priv_state = kzalloc(sizeof(*priv_state), GFP_KERNEL);
+static struct drm_private_state *
+ingenic_drm_create_state(struct drm_private_obj *obj)
+{
+ struct ingenic_drm_private_state *priv_state;
+
++ priv_state = kzalloc_obj(*priv_state);
+ if (!priv_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &priv_state->base);
+
+ return &priv_state->base;
+}
+
DEFINE_DRM_GEM_DMA_FOPS(ingenic_drm_fops);
static const struct drm_driver ingenic_drm_driver_data = {
kfree(priv_state);
}
- priv_state = kzalloc(sizeof(*priv_state), GFP_KERNEL);
+static struct drm_private_state *
+ingenic_ipu_create_state(struct drm_private_obj *obj)
+{
+ struct ingenic_ipu_private_state *priv_state;
+
++ priv_state = kzalloc_obj(*priv_state);
+ if (!priv_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &priv_state->base);
+
+ return &priv_state->base;
+}
+
static const struct drm_private_state_funcs ingenic_ipu_private_state_funcs = {
+ .atomic_create_state = ingenic_ipu_create_state,
.atomic_duplicate_state = ingenic_ipu_duplicate_state,
.atomic_destroy_state = ingenic_ipu_destroy_state,
};
kfree(dpu_state);
}
- dpu_state = kzalloc(sizeof(*dpu_state), GFP_KERNEL);
+static struct drm_private_state *
+dpu_kms_global_create_state(struct drm_private_obj *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct dpu_global_state *dpu_state;
+
++ dpu_state = kzalloc_obj(*dpu_state);
+ if (!dpu_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &dpu_state->base);
+ dpu_state->rm = &dpu_kms->rm;
+
+ return &dpu_state->base;
+}
+
static void dpu_kms_global_print_state(struct drm_printer *p,
const struct drm_private_state *state)
{
kfree(mdp5_state);
}
- mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+static struct drm_private_state *
+mdp5_global_create_state(struct drm_private_obj *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_global_state *mdp5_state;
+
++ mdp5_state = kzalloc_obj(*mdp5_state);
+ if (!mdp5_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &mdp5_state->base);
+ mdp5_state->mdp5_kms = mdp5_kms;
+
+ return &mdp5_state->base;
+}
+
static void mdp5_global_print_state(struct drm_printer *p,
const struct drm_private_state *state)
{
kfree(ctm_state);
}
- ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
+static struct drm_private_state *
+vc4_ctm_create_state(struct drm_private_obj *obj)
+{
+ struct vc4_ctm_state *ctm_state;
+
++ ctm_state = kzalloc_obj(*ctm_state);
+ if (!ctm_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &ctm_state->base);
+
+ return &ctm_state->base;
+}
+
static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
+ .atomic_create_state = vc4_ctm_create_state,
.atomic_duplicate_state = vc4_ctm_duplicate_state,
.atomic_destroy_state = vc4_ctm_destroy_state,
};
kfree(load_state);
}
- load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
+static struct drm_private_state *
+vc4_load_tracker_create_state(struct drm_private_obj *obj)
+{
+ struct vc4_load_tracker_state *load_state;
+
++ load_state = kzalloc_obj(*load_state);
+ if (!load_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &load_state->base);
+
+ return &load_state->base;
+}
+
static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
+ .atomic_create_state = vc4_load_tracker_create_state,
.atomic_duplicate_state = vc4_load_tracker_duplicate_state,
.atomic_destroy_state = vc4_load_tracker_destroy_state,
};
}
}
- hvs_state = kzalloc(sizeof(*hvs_state), GFP_KERNEL);
+static struct drm_private_state *
+vc4_hvs_channels_create_state(struct drm_private_obj *obj)
+{
+ struct vc4_hvs_state *hvs_state;
+
++ hvs_state = kzalloc_obj(*hvs_state);
+ if (!hvs_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_private_obj_create_state(obj, &hvs_state->base);
+
+ return &hvs_state->base;
+}
+
static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
+ .atomic_create_state = vc4_hvs_channels_create_state,
.atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
.atomic_destroy_state = vc4_hvs_channels_destroy_state,
.atomic_print_state = vc4_hvs_channels_print_state,
}
}
- if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
+ if (!vgdev->has_resource_blob)
return drm_gem_prime_import(dev, buf);
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ bo = kzalloc_obj(*bo);
if (!bo)
return ERR_PTR(-ENOMEM);