WARN_ON(tag->hdr.tag != ATAG_NONE);
- b = kmalloc_flex(*b, data, size, GFP_KERNEL);
+ b = kmalloc_flex(*b, data, size);
if (!b)
goto nomem;
for_each_mem_range(i, &start, &end)
nr_ranges++;
- cmem = kmalloc_flex(*cmem, ranges, nr_ranges, GFP_KERNEL);
+ cmem = kmalloc_flex(*cmem, ranges, nr_ranges);
if (!cmem)
return -ENOMEM;
for_each_mem_range(i, &start, &end)
nr_ranges++;
- cmem = kmalloc_flex(*cmem, ranges, nr_ranges, GFP_KERNEL);
+ cmem = kmalloc_flex(*cmem, ranges, nr_ranges);
if (!cmem)
return -ENOMEM;
repo->dev_index, repo->dev_type, port, blk_size, num_blocks,
num_regions);
- p = kzalloc_flex(*p, regions, num_regions, GFP_KERNEL);
+ p = kzalloc_flex(*p, regions, num_regions);
if (!p) {
result = -ENOMEM;
goto fail_malloc;
nr_ranges = 1; /* For exclusion of crashkernel region */
walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
- cmem = kmalloc_flex(*cmem, ranges, nr_ranges, GFP_KERNEL);
+ cmem = kmalloc_flex(*cmem, ranges, nr_ranges);
if (!cmem)
return -ENOMEM;
} *attr_group;
for (i = 0; type->event_descs[i].attr.attr.name; i++);
- attr_group = kzalloc_flex(*attr_group, attrs, i + 1, GFP_KERNEL);
+ attr_group = kzalloc_flex(*attr_group, attrs, i + 1);
if (!attr_group)
goto err;
else if (rapl_pmu_scope != PERF_PMU_SCOPE_PKG)
return -EINVAL;
- rapl_pmus = kzalloc_flex(*rapl_pmus, rapl_pmu, nr_rapl_pmu, GFP_KERNEL);
+ rapl_pmus = kzalloc_flex(*rapl_pmus, rapl_pmu, nr_rapl_pmu);
if (!rapl_pmus)
return -ENOMEM;
int err;
mce_log_len = max(MCE_LOG_MIN_LEN, num_online_cpus());
- mcelog = kzalloc_flex(*mcelog, entry, mce_log_len, GFP_KERNEL);
+ mcelog = kzalloc_flex(*mcelog, entry, mce_log_len);
if (!mcelog)
return -ENOMEM;
DEFLATE_DEF_MEMLEVEL));
struct deflate_stream *ctx;
- ctx = kvmalloc_flex(*ctx, workspace, size, GFP_KERNEL);
+ ctx = kvmalloc_flex(*ctx, workspace, size);
if (!ctx)
return ERR_PTR(-ENOMEM);
if (!wksp_size)
return ERR_PTR(-EINVAL);
- ctx = kvmalloc_flex(*ctx, wksp, wksp_size, GFP_KERNEL);
+ ctx = kvmalloc_flex(*ctx, wksp, wksp_size);
if (!ctx)
return ERR_PTR(-ENOMEM);
struct async_events *events;
int i, ret;
- events = kzalloc_flex(*events, event, total_col, GFP_KERNEL);
+ events = kzalloc_flex(*events, event, total_col);
if (!events)
return -ENOMEM;
struct solver_node *node;
int ret;
- node = kzalloc_flex(*node, start_cols, cdop->cols_len, GFP_KERNEL);
+ node = kzalloc_flex(*node, start_cols, cdop->cols_len);
if (!node)
return ERR_PTR(-ENOMEM);
int ret, idx;
XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
- job = kzalloc_flex(*job, bos, arg_bo_cnt, GFP_KERNEL);
+ job = kzalloc_flex(*job, bos, arg_bo_cnt);
if (!job)
return -ENOMEM;
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
- job = kzalloc_flex(*job, bos, bo_count, GFP_KERNEL);
+ job = kzalloc_flex(*job, bos, bo_count);
if (!job)
return NULL;
if (!nr_cpr)
goto out;
- cpr_log = kzalloc_flex(*cpr_log, cpr, nr_cpr, GFP_KERNEL);
+ cpr_log = kzalloc_flex(*cpr_log, cpr, nr_cpr);
if (!cpr_log)
goto out;
goto out_unlock;
ret = -ENOMEM;
- ub = kzalloc_flex(*ub, queues, info.nr_hw_queues, GFP_KERNEL);
+ ub = kzalloc_flex(*ub, queues, info.nr_hw_queues);
if (!ub)
goto out_unlock;
mutex_init(&ub->mutex);
goto out;
}
- zlo = kvzalloc_flex(*zlo, zones, nr_zones, GFP_KERNEL);
+ zlo = kvzalloc_flex(*zlo, zones, nr_zones);
if (!zlo) {
ret = -ENOMEM;
goto out;
val = energy_quirk;
}
- func = kzalloc_flex(*func, template, num, GFP_KERNEL);
+ func = kzalloc_flex(*func, template, num);
if (!func)
return ERR_PTR(-ENOMEM);
return 0;
}
- hpetp = kzalloc_flex(*hpetp, hp_dev, hdp->hd_nirqs, GFP_KERNEL);
+ hpetp = kzalloc_flex(*hpetp, hp_dev, hdp->hd_nirqs);
if (!hpetp)
return -ENOMEM;
* Allocate buffer and the sg list. The sg list array is allocated
* directly after the port_buffer struct.
*/
- buf = kmalloc_flex(*buf, sg, pages, GFP_KERNEL);
+ buf = kmalloc_flex(*buf, sg, pages);
if (!buf)
goto fail;
unsigned int num_clks = ncore + nsystem + nperiph + ngck + npck;
struct pmc_data *pmc_data;
- pmc_data = kzalloc_flex(*pmc_data, hwtable, num_clks, GFP_KERNEL);
+ pmc_data = kzalloc_flex(*pmc_data, hwtable, num_clks);
if (!pmc_data)
return NULL;
if (IS_ERR(slow_osc))
goto unregister_slow_rc;
- clk_data = kzalloc_flex(*clk_data, hws, 2, GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, 2);
if (!clk_data)
goto unregister_slow_osc;
if (WARN_ON(!pll))
return;
- clk_data = kzalloc_flex(*clk_data, hws, num_clks, GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, num_clks);
if (WARN_ON(!clk_data))
goto err_clk_data;
clk_data->num = num_clks;
u8 avpll_flags = 0;
int n, ret;
- clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS, GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS);
if (!clk_data) {
of_node_put(parent_np);
return;
struct clk_hw **hws;
int n, ret;
- clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS, GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS);
if (!clk_data) {
of_node_put(parent_np);
return;
u32 rate;
int n;
- clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS, GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS);
if (!clk_data)
return;
clk_data->num = MAX_CLKS;
clk_count = data->pll_count + data->div_count +
data->fixed_factor_count + data->early_clk_count;
- cells = kzalloc_flex(*cells, hws, clk_count, GFP_KERNEL);
+ cells = kzalloc_flex(*cells, hws, clk_count);
if (!cells)
return -ENOMEM;
clk_count = early_data->early_pll_count + early_data->early_fixed_factor_count +
early_data->late_clk_count;
- cells = kzalloc_flex(*cells, hws, clk_count, GFP_KERNEL);
+ cells = kzalloc_flex(*cells, hws, clk_count);
if (!cells) {
ret = -ENOMEM;
goto err;
const char *hse_clk, *lse_clk, *i2s_clk;
struct regmap *pdrm;
- clk_data = kzalloc_flex(*clk_data, hws, STM32H7_MAX_CLKS, GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, STM32H7_MAX_CLKS);
if (!clk_data)
return;
cpu_div = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_CLK1DIV);
cpu_freq = mult_frac(in_freq, mul, cpu_div);
- onecell = kzalloc_flex(*onecell, hws, BOSTON_CLK_COUNT, GFP_KERNEL);
+ onecell = kzalloc_flex(*onecell, hws, BOSTON_CLK_COUNT);
if (!onecell)
return;
struct device_node *np;
void __iomem *base;
- clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX7D_CLK_END, GFP_KERNEL);
+ clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX7D_CLK_END);
if (WARN_ON(!clk_hw_data))
return;
}
}
- tcu->clocks = kzalloc_flex(*tcu->clocks, hws, TCU_CLK_COUNT, GFP_KERNEL);
+ tcu->clocks = kzalloc_flex(*tcu->clocks, hws, TCU_CLK_COUNT);
if (!tcu->clocks) {
ret = -ENOMEM;
goto err_clk_disable;
{
struct clk_hw_onecell_data *clk_data;
- clk_data = kzalloc_flex(*clk_data, hws, clk_num, GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, clk_num);
if (!clk_data)
return NULL;
count = ARRAY_SIZE(mt7621_clks_base) +
ARRAY_SIZE(mt7621_fixed_clks) + ARRAY_SIZE(mt7621_gates);
- clk_data = kzalloc_flex(*clk_data, hws, count, GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, count);
if (!clk_data)
goto free_clk_priv;
priv->data = data;
count = priv->data->num_clk_base + priv->data->num_clk_fixed +
priv->data->num_clk_factor + priv->data->num_clk_periph;
- clk_data = kzalloc_flex(*clk_data, hws, count, GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, count);
if (!clk_data)
goto free_clk_priv;
struct clk *clk;
unsigned int i;
- clock = kzalloc_flex(*clock, parents, num_parents, GFP_KERNEL);
+ clock = kzalloc_flex(*clock, parents, num_parents);
if (!clock)
return ERR_PTR(-ENOMEM);
struct clk **clks;
unsigned int i;
- group = kzalloc_flex(*group, clks, MSTP_MAX_CLOCKS, GFP_KERNEL);
+ group = kzalloc_flex(*group, clks, MSTP_MAX_CLOCKS);
if (!group)
return;
}
nclks = info->num_total_core_clks + info->num_hw_mod_clks;
- priv = kzalloc_flex(*priv, clks, nclks, GFP_KERNEL);
+ priv = kzalloc_flex(*priv, clks, nclks);
if (!priv)
return -ENOMEM;
struct samsung_clk_provider *ctx;
int i;
- ctx = kzalloc_flex(*ctx, clk_data.hws, nr_clks, GFP_KERNEL);
+ ctx = kzalloc_flex(*ctx, clk_data.hws, nr_clks);
if (!ctx)
panic("could not allocate clock provider context.\n");
struct visconti_pll_provider *ctx;
int i;
- ctx = kzalloc_flex(*ctx, clk_data.hws, nr_plls, GFP_KERNEL);
+ ctx = kzalloc_flex(*ctx, clk_data.hws, nr_plls);
if (!ctx)
return ERR_PTR(-ENOMEM);
if (ret)
return ret;
- zynqmp_data = kzalloc_flex(*zynqmp_data, hws, clock_max_idx, GFP_KERNEL);
+ zynqmp_data = kzalloc_flex(*zynqmp_data, hws, clock_max_idx);
if (!zynqmp_data)
return -ENOMEM;
if (IS_ERR(map))
return PTR_ERR(map);
- tcu = kzalloc_flex(*tcu, timers, num_possible_cpus(), GFP_KERNEL);
+ tcu = kzalloc_flex(*tcu, timers, num_possible_cpus());
if (!tcu)
return -ENOMEM;
states = 2;
/* Allocate private data and frequency table for current cpu */
- centaur = kzalloc_flex(*centaur, freq_table, states + 1, GFP_KERNEL);
+ centaur = kzalloc_flex(*centaur, freq_table, states + 1);
if (!centaur)
return -ENOMEM;
eps_cpu[0] = centaur;
return NULL;
struct cxl_feat_entries *entries __free(kvfree) =
- kvmalloc_flex(*entries, ent, count, GFP_KERNEL);
+ kvmalloc_flex(*entries, ent, count);
if (!entries)
return NULL;
struct cxl_mbox_cmd mbox_cmd;
int rc;
- transfer = kzalloc_flex(*transfer, data, 0, GFP_KERNEL);
+ transfer = kzalloc_flex(*transfer, data, 0);
if (!transfer)
return -ENOMEM;
if (!is_cxl_root(port))
return ERR_PTR(-EINVAL);
- cxlrd = kzalloc_flex(*cxlrd, cxlsd.target, nr_targets, GFP_KERNEL);
+ cxlrd = kzalloc_flex(*cxlrd, cxlsd.target, nr_targets);
if (!cxlrd)
return ERR_PTR(-ENOMEM);
if (is_cxl_root(port) || is_cxl_endpoint(port))
return ERR_PTR(-EINVAL);
- cxlsd = kzalloc_flex(*cxlsd, target, nr_targets, GFP_KERNEL);
+ cxlsd = kzalloc_flex(*cxlsd, target, nr_targets);
if (!cxlsd)
return ERR_PTR(-ENOMEM);
return -ENXIO;
struct cxl_pmem_region *cxlr_pmem __free(kfree) =
- kzalloc_flex(*cxlr_pmem, mapping, p->nr_targets, GFP_KERNEL);
+ kzalloc_flex(*cxlr_pmem, mapping, p->nr_targets);
if (!cxlr_pmem)
return -ENOMEM;
return -EINVAL;
set_lsa =
- kvzalloc_flex(*set_lsa, data, cmd->in_length, GFP_KERNEL);
+ kvzalloc_flex(*set_lsa, data, cmd->in_length);
if (!set_lsa)
return -ENOMEM;
init_node_memory_type(numa_node, mtype);
rc = -ENOMEM;
- data = kzalloc_flex(*data, res, dev_dax->nr_range, GFP_KERNEL);
+ data = kzalloc_flex(*data, res, dev_dax->nr_range);
if (!data)
goto err_dax_kmem_data;
{
struct dma_fence_array *array;
- return kzalloc_flex(*array, callbacks, num_fences, GFP_KERNEL);
+ return kzalloc_flex(*array, callbacks, num_fences);
}
EXPORT_SYMBOL(dma_fence_array_alloc);
DRIVER_NAME))
return -EBUSY;
- td = kzalloc_flex(*td, channels, pdata->nr_channels, GFP_KERNEL);
+ td = kzalloc_flex(*td, channels, pdata->nr_channels);
if (!td) {
err = -ENOMEM;
goto err_release_region;
struct skx_dev *d;
for (i = 0; i < n; i++) {
- d = kzalloc_flex(*d, imc, imc_num, GFP_KERNEL);
+ d = kzalloc_flex(*d, imc, imc_num);
if (!d)
return -ENOMEM;
if (!pdev)
break;
ndev++;
- d = kzalloc_flex(*d, imc, imc_num, GFP_KERNEL);
+ d = kzalloc_flex(*d, imc, imc_num);
if (!d) {
pci_dev_put(pdev);
return -ENOMEM;
struct cros_ec_command *msg;
int ret;
- msg = kzalloc_flex(*msg, data, max(outsize, insize), GFP_KERNEL);
+ msg = kzalloc_flex(*msg, data, max(outsize, insize));
if (!msg)
return -ENOMEM;
if (a->length > 256)
return -EINVAL;
- r = kmalloc_flex(*r, data, a->length, GFP_KERNEL);
+ r = kmalloc_flex(*r, data, a->length);
if (r == NULL)
return -ENOMEM;
if (gpio_aggregator_count_lines(aggr) == 0)
return -EINVAL;
- aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1, GFP_KERNEL);
+ aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1);
if (!aggr->lookups)
return -ENOMEM;
memcpy(aggr->args, buf, count + 1);
aggr->init_via_sysfs = true;
- aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1, GFP_KERNEL);
+ aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1);
if (!aggr->lookups) {
res = -ENOMEM;
goto free_ga;
lockdep_assert_held(&dev->lock);
struct gpiod_lookup_table *table __free(kfree) =
- kzalloc_flex(*table, table, num_entries + 1, GFP_KERNEL);
+ kzalloc_flex(*table, table, num_entries + 1);
if (!table)
return -ENOMEM;
if (ret)
return ret;
- lr = kvzalloc_flex(*lr, lines, ulr.num_lines, GFP_KERNEL);
+ lr = kvzalloc_flex(*lr, lines, ulr.num_lines);
if (!lr)
return -ENOMEM;
lr->num_lines = ulr.num_lines;
if (!key)
return -ENOMEM;
- lookup = kzalloc_flex(*lookup, table, 2, GFP_KERNEL);
+ lookup = kzalloc_flex(*lookup, table, 2);
if (!lookup)
return -ENOMEM;
struct gpio_desc_label *new = NULL, *old;
if (label) {
- new = kzalloc_flex(*new, str, strlen(label) + 1, GFP_KERNEL);
+ new = kzalloc_flex(*new, str, strlen(label) + 1);
if (!new)
return -ENOMEM;
unsigned i;
int r;
- list = kvzalloc_flex(*list, entries, num_entries, GFP_KERNEL);
+ list = kvzalloc_flex(*list, entries, num_entries);
if (!list)
return -ENOMEM;
int32_t ctx_prio;
int r;
- entity = kzalloc_flex(*entity, fences, amdgpu_sched_jobs, GFP_KERNEL);
+ entity = kzalloc_flex(*entity, fences, amdgpu_sched_jobs);
if (!entity)
return -ENOMEM;
struct ttm_range_mgr_node *node;
int r;
- node = kzalloc_flex(*node, mm_nodes, 1, GFP_KERNEL);
+ node = kzalloc_flex(*node, mm_nodes, 1);
if (!node)
return -ENOMEM;
if (num_ibs == 0)
return -EINVAL;
- *job = kzalloc_flex(**job, ibs, num_ibs, GFP_KERNEL);
+ *job = kzalloc_flex(**job, ibs, num_ibs);
if (!*job)
return -ENOMEM;
PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries),
"Invalid CAC Leakage PowerPlay Table!", return 1);
- table = kzalloc_flex(*table, entries, max_levels, GFP_KERNEL);
+ table = kzalloc_flex(*table, entries, max_levels);
if (!table)
return -ENOMEM;
int count = 8;
struct phm_clock_voltage_dependency_table *table_clk_vlt;
- table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, count, GFP_KERNEL);
+ table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, count);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate memory!\n");
uint32_t i;
struct smu10_voltage_dependency_table *ptable;
- ptable = kzalloc_flex(*ptable, entries, num_entry, GFP_KERNEL);
+ ptable = kzalloc_flex(*ptable, entries, num_entry);
if (NULL == ptable)
return -ENOMEM;
{
struct phm_clock_voltage_dependency_table *table_clk_vlt;
- table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 8, GFP_KERNEL);
+ table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 8);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate memory!\n");
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
/* initialize vddc_dep_on_dal_pwrl table */
- table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 4, GFP_KERNEL);
+ table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 4);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
num_entries = clk_dep_table->ucNumEntries;
- clk_table = kzalloc_flex(*clk_table, entries, num_entries, GFP_KERNEL);
+ clk_table = kzalloc_flex(*clk_table, entries, num_entries);
if (!clk_table)
return -ENOMEM;
PP_ASSERT_WITH_CODE((vddc_lookup_pp_tables->ucNumEntries != 0),
"Invalid SOC_VDDD Lookup Table!", return 1);
- table = kzalloc_flex(*table, entries, max_levels, GFP_KERNEL);
+ table = kzalloc_flex(*table, entries, max_levels);
if (!table)
return -ENOMEM;
{
struct gpiod_lookup_table *lookup;
- lookup = kzalloc_flex(*lookup, table, 2, GFP_KERNEL);
+ lookup = kzalloc_flex(*lookup, table, 2);
if (!lookup)
return;
{
struct i915_gem_engines *e;
- e = kzalloc_flex(*e, engines, count, GFP_KERNEL);
+ e = kzalloc_flex(*e, engines, count);
if (!e)
return NULL;
struct dma_buf *dmabuf;
int i;
- mock = kmalloc_flex(*mock, pages, npages, GFP_KERNEL);
+ mock = kmalloc_flex(*mock, pages, npages);
if (!mock)
return ERR_PTR(-ENOMEM);
unsigned int n;
int err;
- ve = kzalloc_flex(*ve, siblings, count, GFP_KERNEL);
+ ve = kzalloc_flex(*ve, siblings, count);
if (!ve)
return ERR_PTR(-ENOMEM);
VFIO_REGION_INFO_FLAG_WRITE;
info->size = gvt_aperture_sz(vgpu->gvt);
- sparse = kzalloc_flex(*sparse, areas, nr_areas, GFP_KERNEL);
+ sparse = kzalloc_flex(*sparse, areas, nr_areas);
if (!sparse)
return -ENOMEM;
{
struct i915_syncmap *p;
- p = kmalloc_flex(*p, seqno, KSYNCMAP, GFP_KERNEL);
+ p = kmalloc_flex(*p, seqno, KSYNCMAP);
if (unlikely(!p))
return NULL;
unsigned int above;
/* Insert a join above the current layer */
- next = kzalloc_flex(*next, child, KSYNCMAP, GFP_KERNEL);
+ next = kzalloc_flex(*next, child, KSYNCMAP);
if (unlikely(!next))
return -ENOMEM;
if (!stats)
return -ENOMEM;
- ps = kzalloc_flex(*ps, ce, nengines, GFP_KERNEL);
+ ps = kzalloc_flex(*ps, ce, nengines);
if (!ps) {
kfree(stats);
return -ENOMEM;
{
struct nouveau_pfnmap_args *args;
- args = kzalloc_flex(*args, p.phys, npages, GFP_KERNEL);
+ args = kzalloc_flex(*args, p.phys, npages);
if (!args)
return NULL;
if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
return;
- drm->svm = svm = kzalloc_flex(*drm->svm, buffer, 1, GFP_KERNEL);
+ drm->svm = svm = kzalloc_flex(*drm->svm, buffer, 1);
if (!drm->svm)
return;
struct nvkm_chid *chid;
int id;
- if (!(chid = *pchid = kzalloc_flex(*chid, used, nr, GFP_KERNEL)))
+ if (!(chid = *pchid = kzalloc_flex(*chid, used, nr)))
return -ENOMEM;
kref_init(&chid->kref);
{
struct nvkm_engine_func *func;
- func = kzalloc_flex(*func, sclass, nclass + 1, GFP_KERNEL);
+ func = kzalloc_flex(*func, sclass, nclass + 1);
if (!func)
return -ENOMEM;
struct nvkm_gr_func *func;
struct r535_gr *gr;
- func = kzalloc_flex(*func, sclass, ARRAY_SIZE(classes) + 1, GFP_KERNEL);
+ func = kzalloc_flex(*func, sclass, ARRAY_SIZE(classes) + 1);
if (!func)
return -ENOMEM;
if (!lpfn)
lpfn = man->size;
- node = kzalloc_flex(*node, mm_nodes, 1, GFP_KERNEL);
+ node = kzalloc_flex(*node, mm_nodes, 1);
if (!node)
return -ENOMEM;
return -EINVAL;
}
- perfmon = kzalloc_flex(*perfmon, values, req->ncounters, GFP_KERNEL);
+ perfmon = kzalloc_flex(*perfmon, values, req->ncounters);
if (!perfmon)
return -ENOMEM;
return -EINVAL;
}
- perfmon = kzalloc_flex(*perfmon, counters, req->ncounters, GFP_KERNEL);
+ perfmon = kzalloc_flex(*perfmon, counters, req->ncounters);
if (!perfmon)
return -ENOMEM;
perfmon->dev = vc4;
{
struct virtio_gpu_object_array *objs;
- objs = kmalloc_flex(*objs, objs, nents, GFP_KERNEL);
+ objs = kmalloc_flex(*objs, objs, nents);
if (!objs)
return NULL;
/* only kernel queues can be permanent */
XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
- q = kzalloc_flex(*q, lrc, width, GFP_KERNEL);
+ q = kzalloc_flex(*q, lrc, width);
if (!q)
return ERR_PTR(-ENOMEM);
if (ret < 0)
return ret;
- cbd = kzalloc_flex(*cbd, blob, ret, GFP_KERNEL);
+ cbd = kzalloc_flex(*cbd, blob, ret);
if (!cbd)
return -ENOMEM;
struct xe_bo *bo;
int err;
- pt = kzalloc_flex(*pt, entries, num_entries, GFP_KERNEL);
+ pt = kzalloc_flex(*pt, entries, num_entries);
if (!pt) {
err = -ENOMEM;
goto out;
struct xe_ttm_sys_node *node;
int r;
- node = kzalloc_flex(*node, base.mm_nodes, 1, GFP_KERNEL);
+ node = kzalloc_flex(*node, base.mm_nodes, 1);
if (!node)
return -ENOMEM;
struct gb_module *module;
int i;
- module = kzalloc_flex(*module, interfaces, num_interfaces, GFP_KERNEL);
+ module = kzalloc_flex(*module, interfaces, num_interfaces);
if (!module)
return NULL;
return -EINVAL;
}
- gdev = kzalloc_flex(*gdev, ei, chip->nlines, GFP_KERNEL);
+ gdev = kzalloc_flex(*gdev, ei, chip->nlines);
if (!gdev)
return -ENOMEM;
if (!ops || !ops->attach_addr || !ops->detach_addr)
return ERR_PTR(-EINVAL);
- atr = kzalloc_flex(*atr, adapter, max_adapters, GFP_KERNEL);
+ atr = kzalloc_flex(*atr, adapter, max_adapters);
if (!atr)
return ERR_PTR(-ENOMEM);
{
struct adi_i3c_xfer *xfer;
- xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL);
+ xfer = kzalloc_flex(*xfer, cmds, ncmds);
if (!xfer)
return NULL;
{
struct dw_i3c_xfer *xfer;
- xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL);
+ xfer = kzalloc_flex(*xfer, cmds, ncmds);
if (!xfer)
return NULL;
{
struct cdns_i3c_xfer *xfer;
- xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL);
+ xfer = kzalloc_flex(*xfer, cmds, ncmds);
if (!xfer)
return NULL;
}
if (nr_rings > XFER_RINGS)
nr_rings = XFER_RINGS;
- rings = kzalloc_flex(*rings, headers, nr_rings, GFP_KERNEL);
+ rings = kzalloc_flex(*rings, headers, nr_rings);
if (!rings)
return -ENOMEM;
hci->io_data = rings;
{
struct renesas_i3c_xfer *xfer;
- xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL);
+ xfer = kzalloc_flex(*xfer, cmds, ncmds);
if (!xfer)
return NULL;
{
struct svc_i3c_xfer *xfer;
- xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL);
+ xfer = kzalloc_flex(*xfer, cmds, ncmds);
if (!xfer)
return NULL;
return buf;
}
- buf = kzalloc_flex(*buf, scan_mask, mask_longs, GFP_KERNEL);
+ buf = kzalloc_flex(*buf, scan_mask, mask_longs);
if (!buf)
return NULL;
attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]);
- work = kmalloc_flex(*work, path, paths, GFP_KERNEL);
+ work = kmalloc_flex(*work, path, paths);
if (!work) {
ib_free_recv_mad(mad_recv_wc);
return;
int i;
int count = 0;
- dev = kmalloc_flex(*dev, port, device->phys_port_cnt, GFP_KERNEL);
+ dev = kmalloc_flex(*dev, port, device->phys_port_cnt);
if (!dev)
return -ENOMEM;
{
struct rdma_hw_stats *stats;
- stats = kzalloc_flex(*stats, value, num_counters, GFP_KERNEL);
+ stats = kzalloc_flex(*stats, value, num_counters);
if (!stats)
return NULL;
* Allocate the node first so we can handle a potential
* failure before we've programmed anything.
*/
- node = kzalloc_flex(*node, pages, npages, GFP_KERNEL);
+ node = kzalloc_flex(*node, pages, npages);
if (!node)
return -ENOMEM;
return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
num_sge);
- work = kvzalloc_flex(*work, frags, num_sge, GFP_KERNEL);
+ work = kvzalloc_flex(*work, frags, num_sge);
if (!work)
return -ENOMEM;
obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
- table = kmalloc_flex(*table, icm, num_icm, GFP_KERNEL);
+ table = kmalloc_flex(*table, icm, num_icm);
if (!table)
return NULL;
return NULL;
npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
- db_tab = kmalloc_flex(*db_tab, page, npages, GFP_KERNEL);
+ db_tab = kmalloc_flex(*db_tab, page, npages);
if (!db_tab)
return ERR_PTR(-ENOMEM);
/* Allocate struct plus pointers to first level page tables. */
m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
- mr = kzalloc_flex(*mr, mr.map, m, GFP_KERNEL);
+ mr = kzalloc_flex(*mr, mr.map, m);
if (!mr)
goto bail;
num_sge);
/* Asynchronous call is "best-effort" and allowed to fail */
- work = kvzalloc_flex(*work, frags, num_sge, GFP_KERNEL);
+ work = kvzalloc_flex(*work, frags, num_sge);
if (!work)
return -ENOMEM;
if (num_buf == 0)
return ERR_PTR(-EINVAL);
- pbl = kzalloc_flex(*pbl, pbe, num_buf, GFP_KERNEL);
+ pbl = kzalloc_flex(*pbl, pbe, num_buf);
if (!pbl)
return ERR_PTR(-ENOMEM);
if (pool_size <= 0)
goto err;
ret = -ENOMEM;
- pool = kzalloc_flex(*pool, desc, pool_size, GFP_KERNEL);
+ pool = kzalloc_flex(*pool, desc, pool_size);
if (!pool)
goto err;
pool->size = pool_size;
pr_debug("device = %p\n", device);
- sdev = kzalloc_flex(*sdev, port, device->phys_port_cnt, GFP_KERNEL);
+ sdev = kzalloc_flex(*sdev, port, device->phys_port_cnt);
if (!sdev)
return -ENOMEM;
struct evdev_client *client;
int error;
- client = kvzalloc_flex(*client, buffer, bufsize, GFP_KERNEL);
+ client = kvzalloc_flex(*client, buffer, bufsize);
if (!client)
return -ENOMEM;
}
struct ff_device *ff __free(kfree) =
- kzalloc_flex(*ff, effect_owners, max_effects, GFP_KERNEL);
+ kzalloc_flex(*ff, effect_owners, max_effects);
if (!ff)
return -ENOMEM;
if (!num_leds)
return -ENXIO;
- leds = kzalloc_flex(*leds, leds, num_leds, GFP_KERNEL);
+ leds = kzalloc_flex(*leds, leds, num_leds);
if (!leds)
return -ENOMEM;
return -EINVAL;
struct input_mt *mt __free(kfree) =
- kzalloc_flex(*mt, slots, num_slots, GFP_KERNEL);
+ kzalloc_flex(*mt, slots, num_slots);
if (!mt)
return -ENOMEM;
row_shift = get_count_order(pdata->cols);
keycodemax = pdata->rows << row_shift;
- omap_kp = kzalloc_flex(*omap_kp, keymap, keycodemax, GFP_KERNEL);
+ omap_kp = kzalloc_flex(*omap_kp, keymap, keycodemax);
input_dev = input_allocate_device();
if (!omap_kp || !input_dev) {
kfree(omap_kp);
struct icc_path *path;
int i;
- path = kzalloc_flex(*path, reqs, num_nodes, GFP_KERNEL);
+ path = kzalloc_flex(*path, reqs, num_nodes);
if (!path)
return ERR_PTR(-ENOMEM);
return -ENOMEM;
/* Preallocate for the overwhelmingly common case of 1 ID */
- fwspec = kzalloc_flex(*fwspec, ids, 1, GFP_KERNEL);
+ fwspec = kzalloc_flex(*fwspec, ids, 1);
if (!fwspec)
return -ENOMEM;
struct dm_bio_prison *prison;
num_locks = dm_num_hash_locks();
- prison = kzalloc_flex(*prison, regions, num_locks, GFP_KERNEL);
+ prison = kzalloc_flex(*prison, regions, num_locks);
if (!prison)
return NULL;
prison->num_locks = num_locks;
return -EINVAL;
}
- cc = kzalloc_flex(*cc, key, key_size, GFP_KERNEL);
+ cc = kzalloc_flex(*cc, key, key_size);
if (!cc) {
ti->error = "Cannot allocate encryption context";
return -ENOMEM;
return ERR_PTR(-EINVAL);
}
- rs = kzalloc_flex(*rs, dev, raid_devs, GFP_KERNEL);
+ rs = kzalloc_flex(*rs, dev, raid_devs);
if (!rs) {
ti->error = "Cannot allocate raid context";
return ERR_PTR(-ENOMEM);
struct dm_dirty_log *dl)
{
struct mirror_set *ms =
- kzalloc_flex(*ms, mirror, nr_mirrors, GFP_KERNEL);
+ kzalloc_flex(*ms, mirror, nr_mirrors);
if (!ms) {
ti->error = "Cannot allocate mirror context";
return -EINVAL;
}
- sc = kmalloc_flex(*sc, stripe, stripes, GFP_KERNEL);
+ sc = kmalloc_flex(*sc, stripe, stripes);
if (!sc) {
ti->error = "Memory allocation for striped context failed";
return -ENOMEM;
{
struct switch_ctx *sctx;
- sctx = kzalloc_flex(*sctx, path_list, nr_paths, GFP_KERNEL);
+ sctx = kzalloc_flex(*sctx, path_list, nr_paths);
if (!sctx)
return NULL;
int cnt;
int i;
- conf = kzalloc_flex(*conf, disks, raid_disks, GFP_KERNEL);
+ conf = kzalloc_flex(*conf, disks, raid_disks);
if (!conf)
return ERR_PTR(-ENOMEM);
struct tegra_ctx *ctx;
int err;
- ctx = kzalloc_flex(*ctx, ctrls, ARRAY_SIZE(ctrl_cfgs), GFP_KERNEL);
+ ctx = kzalloc_flex(*ctx, ctrls, ARRAY_SIZE(ctrl_cfgs));
if (!ctx)
return -ENOMEM;
if (elems < 1)
elems = 1;
- sev = kvzalloc_flex(*sev, events, elems, GFP_KERNEL);
+ sev = kvzalloc_flex(*sev, events, elems);
if (!sev)
return -ENOMEM;
sev->elems = elems;
goto err_out_int;
}
- jm = kzalloc_flex(*jm, hosts, cnt, GFP_KERNEL);
+ jm = kzalloc_flex(*jm, hosts, cnt);
if (!jm) {
rc = -ENOMEM;
goto err_out_int;
return -EINVAL;
}
- entry = kzalloc_flex(*entry, to_v_msg, 1, GFP_KERNEL);
+ entry = kzalloc_flex(*entry, to_v_msg, 1);
if (!entry)
return -ENOMEM;
entry->to_v_blks = 1; /* always 1 block */
struct enclosure_component_callbacks *cb)
{
struct enclosure_device *edev =
- kzalloc_flex(*edev, component, components, GFP_KERNEL);
+ kzalloc_flex(*edev, component, components);
int err, i;
BUG_ON(!cb);
{
struct lkdtm_cb_fam *inst;
- inst = kzalloc_flex(*inst, array, element_count + 1, GFP_KERNEL);
+ inst = kzalloc_flex(*inst, array, element_count + 1);
if (!inst) {
pr_err("FAIL: could not allocate test struct!\n");
return;
if (args->alert)
fds[count] = args->alert;
- q = kmalloc_flex(*q, entries, total_count, GFP_KERNEL);
+ q = kmalloc_flex(*q, entries, total_count);
if (!q)
return -ENOMEM;
q->task = current;
{
struct tifm_adapter *fm;
- fm = kzalloc_flex(*fm, sockets, num_sockets, GFP_KERNEL);
+ fm = kzalloc_flex(*fm, sockets, num_sockets);
if (fm) {
fm->dev.class = &tifm_adapter_class;
fm->dev.parent = dev;
}
numvirtchips = cfi->numchips * numparts;
- newcfi = kmalloc_flex(*newcfi, chips, numvirtchips, GFP_KERNEL);
+ newcfi = kmalloc_flex(*newcfi, chips, numvirtchips);
if (!newcfi)
return -ENOMEM;
shared = kmalloc_objs(struct flchip_shared, cfi->numchips,
* our caller, and copy the appropriate data into them.
*/
- retcfi = kmalloc_flex(*retcfi, chips, cfi.numchips, GFP_KERNEL);
+ retcfi = kmalloc_flex(*retcfi, chips, cfi.numchips);
if (!retcfi) {
kfree(cfi.cfiq);
return -ENODEV;
}
- nvm = kzalloc_flex(*nvm, regions, nregions, GFP_KERNEL);
+ nvm = kzalloc_flex(*nvm, regions, nregions);
if (!nvm)
return -ENOMEM;
lpddr.numchips = 1;
numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
- retlpddr = kzalloc_flex(*retlpddr, chips, numvirtchips, GFP_KERNEL);
+ retlpddr = kzalloc_flex(*retlpddr, chips, numvirtchips);
if (!retlpddr)
return NULL;
/*
* Allocate the map_info structs in one go.
*/
- info = kzalloc_flex(*info, subdev, nr, GFP_KERNEL);
+ info = kzalloc_flex(*info, subdev, nr);
if (!info) {
ret = -ENOMEM;
goto out;
usable_slaves = kzalloc_flex(*usable_slaves, arr, bond->slave_cnt,
GFP_KERNEL);
- all_slaves = kzalloc_flex(*all_slaves, arr, bond->slave_cnt, GFP_KERNEL);
+ all_slaves = kzalloc_flex(*all_slaves, arr, bond->slave_cnt);
if (!usable_slaves || !all_slaves) {
ret = -ENOMEM;
goto out;
return -EINVAL;
}
- parent = kzalloc_flex(*parent, canch, icount, GFP_KERNEL);
+ parent = kzalloc_flex(*parent, canch, icount);
if (!parent)
return -ENOMEM;
if (list_empty(&gating_cfg->entries))
return false;
- dummy = kzalloc_flex(*dummy, entries, num_entries, GFP_KERNEL);
+ dummy = kzalloc_flex(*dummy, entries, num_entries);
if (!dummy) {
NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory");
return true;
int i, j;
struct bnx2x_vf_mac_vlan_filters *fl = NULL;
- fl = kzalloc_flex(*fl, filters, tlv->n_mac_vlan_filters, GFP_KERNEL);
+ fl = kzalloc_flex(*fl, filters, tlv->n_mac_vlan_filters);
if (!fl)
return -ENOMEM;
struct l2t_data *d;
int i;
- d = kvzalloc_flex(*d, l2tab, l2t_capacity, GFP_KERNEL);
+ d = kvzalloc_flex(*d, l2tab, l2t_capacity);
if (!d)
return NULL;
if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
return NULL;
- ctbl = kvzalloc_flex(*ctbl, hash_list, clipt_size, GFP_KERNEL);
+ ctbl = kvzalloc_flex(*ctbl, hash_list, clipt_size);
if (!ctbl)
return NULL;
if (!max_tids)
return NULL;
- t = kvzalloc_flex(*t, table, max_tids, GFP_KERNEL);
+ t = kvzalloc_flex(*t, table, max_tids);
if (!t)
return NULL;
if (l2t_size < L2T_MIN_HASH_BUCKETS)
return NULL;
- d = kvzalloc_flex(*d, l2tab, l2t_size, GFP_KERNEL);
+ d = kvzalloc_flex(*d, l2tab, l2t_size);
if (!d)
return NULL;
struct sched_table *s;
unsigned int i;
- s = kvzalloc_flex(*s, tab, sched_size, GFP_KERNEL);
+ s = kvzalloc_flex(*s, tab, sched_size);
if (!s)
return NULL;
smt_size = SMT_SIZE;
- s = kvzalloc_flex(*s, smtab, smt_size, GFP_KERNEL);
+ s = kvzalloc_flex(*s, smtab, smt_size);
if (!s)
return NULL;
s->smt_size = smt_size;
struct tc_taprio_qopt_offload *qopt;
int i;
- qopt = kzalloc_flex(*qopt, entries, 255, GFP_KERNEL);
+ qopt = kzalloc_flex(*qopt, entries, 255);
if (!qopt)
return false;
for (i = 0; i < 255; i++)
struct tc_taprio_qopt_offload *qopt;
int i;
- qopt = kzalloc_flex(*qopt, entries, 255, GFP_KERNEL);
+ qopt = kzalloc_flex(*qopt, entries, 255);
if (!qopt)
return false;
for (i = 0; i < 255; i++)
struct tc_taprio_qopt_offload *qopt;
int i;
- qopt = kzalloc_flex(*qopt, entries, 255, GFP_KERNEL);
+ qopt = kzalloc_flex(*qopt, entries, 255);
if (!qopt)
return false;
for (i = 0; i < 255; i++)
struct enetc_bdr *bdr;
int j, err;
- v = kzalloc_flex(*v, tx_ring, v_tx_rings, GFP_KERNEL);
+ v = kzalloc_flex(*v, tx_ring, v_tx_rings);
if (!v)
return -ENOMEM;
vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
- vf_cb = kzalloc_flex(*vf_cb, ae_handle.qs, qnum_per_vf, GFP_KERNEL);
+ vf_cb = kzalloc_flex(*vf_cb, ae_handle.qs, qnum_per_vf);
if (unlikely(!vf_cb)) {
dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
ae_handle = ERR_PTR(-ENOMEM);
ring_count = txr_count + rxr_count;
/* allocate q_vector and rings */
- q_vector = kzalloc_flex(*q_vector, ring, ring_count, GFP_KERNEL);
+ q_vector = kzalloc_flex(*q_vector, ring, ring_count);
if (!q_vector)
return -ENOMEM;
{
struct iavf_ptp_aq_cmd *cmd;
- cmd = kzalloc_flex(*cmd, msg, msglen, GFP_KERNEL);
+ cmd = kzalloc_flex(*cmd, msg, msglen);
if (!cmd)
return NULL;
if (q_index >= num_rxq)
return -EINVAL;
- rule = kzalloc_flex(*rule, rule_info, 1, GFP_KERNEL);
+ rule = kzalloc_flex(*rule, rule_info, 1);
if (!rule)
return -ENOMEM;
if (!idpf_sideband_action_ena(vport, fsp))
return -EOPNOTSUPP;
- rule = kzalloc_flex(*rule, rule_info, 1, GFP_KERNEL);
+ rule = kzalloc_flex(*rule, rule_info, 1);
if (!rule)
return -ENOMEM;
{
struct idpf_queue_set *qp;
- qp = kzalloc_flex(*qp, qs, num, GFP_KERNEL);
+ qp = kzalloc_flex(*qp, qs, num);
if (!qp)
return NULL;
/* allocate q_vector and rings */
q_vector = adapter->q_vector[v_idx];
if (!q_vector)
- q_vector = kzalloc_flex(*q_vector, ring, ring_count, GFP_KERNEL);
+ q_vector = kzalloc_flex(*q_vector, ring, ring_count);
else
memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
if (!q_vector)
q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count),
GFP_KERNEL, node);
if (!q_vector)
- q_vector = kzalloc_flex(*q_vector, ring, ring_count, GFP_KERNEL);
+ q_vector = kzalloc_flex(*q_vector, ring, ring_count);
if (!q_vector)
return -ENOMEM;
char resn[32];
int i;
- d = kzalloc_flex(*d, fields, nfile, GFP_KERNEL);
+ d = kzalloc_flex(*d, fields, nfile);
if (!d)
return -ENOMEM;
{
struct mlx5_flow_handle *handle;
- handle = kzalloc_flex(*handle, rule, num_rules, GFP_KERNEL);
+ handle = kzalloc_flex(*handle, rule, num_rules);
if (!handle)
return NULL;
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- fc_bulk = kvzalloc_flex(*fc_bulk, fcs, bulk_len, GFP_KERNEL);
+ fc_bulk = kvzalloc_flex(*fc_bulk, fcs, bulk_len);
if (!fc_bulk)
return NULL;
return NULL;
pr_pool_ctx = pool_ctx;
bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
- pr_bulk = kvzalloc_flex(*pr_bulk, prs_data, bulk_len, GFP_KERNEL);
+ pr_bulk = kvzalloc_flex(*pr_bulk, prs_data, bulk_len);
if (!pr_bulk)
return NULL;
pattern = pool_ctx;
bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
- mh_bulk = kvzalloc_flex(*mh_bulk, mhs_data, bulk_len, GFP_KERNEL);
+ mh_bulk = kvzalloc_flex(*mh_bulk, mhs_data, bulk_len);
if (!mh_bulk)
return NULL;
mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl) :
module_count;
- env = kzalloc_flex(*env, line_cards, num_of_slots + 1, GFP_KERNEL);
+ env = kzalloc_flex(*env, line_cards, num_of_slots + 1);
if (!env)
return -ENOMEM;
}
nr_entries = div_u64(resource_size, info->alloc_size);
- part = kzalloc_flex(*part, usage, BITS_TO_LONGS(nr_entries), GFP_KERNEL);
+ part = kzalloc_flex(*part, usage, BITS_TO_LONGS(nr_entries));
if (!part)
return ERR_PTR(-ENOMEM);
struct mlxsw_sp_counter_pool *pool;
int err;
- pool = kzalloc_flex(*pool, sub_pools, sub_pools_count, GFP_KERNEL);
+ pool = kzalloc_flex(*pool, sub_pools, sub_pools_count);
if (!pool)
return -ENOMEM;
mlxsw_sp->counter_pool = pool;
return -EINVAL;
}
- nhgi = kzalloc_flex(*nhgi, nexthops, nhs, GFP_KERNEL);
+ nhgi = kzalloc_flex(*nhgi, nexthops, nhs);
if (!nhgi)
return -ENOMEM;
nh_grp->nhgi = nhgi;
struct mlxsw_sp_nexthop *nh;
int err, i;
- nhgi = kzalloc_flex(*nhgi, nexthops, nhs, GFP_KERNEL);
+ nhgi = kzalloc_flex(*nhgi, nexthops, nhs);
if (!nhgi)
return -ENOMEM;
nh_grp->nhgi = nhgi;
struct mlxsw_sp_nexthop *nh;
int err, i;
- nhgi = kzalloc_flex(*nhgi, nexthops, fib6_entry->nrt6, GFP_KERNEL);
+ nhgi = kzalloc_flex(*nhgi, nexthops, fib6_entry->nrt6);
if (!nhgi)
return -ENOMEM;
nh_grp->nhgi = nhgi;
return -EIO;
entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
- span = kzalloc_flex(*span, entries, entries_count, GFP_KERNEL);
+ span = kzalloc_flex(*span, entries, entries_count);
if (!span)
return -ENOMEM;
refcount_set(&span->policer_id_base_ref_count, 0);
return -EIO;
/* Allocate NAPI vector and queue triads */
- nv = kzalloc_flex(*nv, qt, qt_count, GFP_KERNEL);
+ nv = kzalloc_flex(*nv, qt, qt_count);
if (!nv)
return -ENOMEM;
int err;
u16 i;
- dma_buf = kzalloc_flex(*dma_buf, reqs, q_depth, GFP_KERNEL);
+ dma_buf = kzalloc_flex(*dma_buf, reqs, q_depth);
if (!dma_buf)
return -ENOMEM;
gc = gd->gdma_context;
- rxq = kzalloc_flex(*rxq, rx_oobs, apc->rx_queue_size, GFP_KERNEL);
+ rxq = kzalloc_flex(*rxq, rx_oobs, apc->rx_queue_size);
if (!rxq)
return NULL;
{
struct nfp_reprs *reprs;
- reprs = kzalloc_flex(*reprs, reprs, num_reprs, GFP_KERNEL);
+ reprs = kzalloc_flex(*reprs, reprs, num_reprs);
if (!reprs)
return NULL;
reprs->num_reprs = num_reprs;
goto err;
}
- table = kzalloc_flex(*table, ports, cnt, GFP_KERNEL);
+ table = kzalloc_flex(*table, ports, cnt);
if (!table)
goto err;
if (!priv->dma_cap.frpsel)
return -EOPNOTSUPP;
- sel = kzalloc_flex(*sel, keys, nk, GFP_KERNEL);
+ sel = kzalloc_flex(*sel, keys, nk);
if (!sel)
return -ENOMEM;
cls->command = FLOW_CLS_REPLACE;
cls->cookie = dummy_cookie;
- rule = kzalloc_flex(*rule, action.entries, 1, GFP_KERNEL);
+ rule = kzalloc_flex(*rule, action.entries, 1);
if (!rule) {
ret = -ENOMEM;
goto cleanup_cls;
cls->command = FLOW_CLS_REPLACE;
cls->cookie = dummy_cookie;
- rule = kzalloc_flex(*rule, action.entries, 1, GFP_KERNEL);
+ rule = kzalloc_flex(*rule, action.entries, 1);
if (!rule) {
ret = -ENOMEM;
goto cleanup_cls;
/* note this will allocate space for the ring structure as well! */
ring_count = txr_count + rxr_count;
- q_vector = kzalloc_flex(*q_vector, ring, ring_count, GFP_KERNEL);
+ q_vector = kzalloc_flex(*q_vector, ring, ring_count);
if (!q_vector)
return -ENOMEM;
ports = 4;
}
- card = kzalloc_flex(*card, ports, ports, GFP_KERNEL);
+ card = kzalloc_flex(*card, ports, ports);
if (!card) {
pci_release_regions(pdev);
pci_disable_device(pdev);
struct ath11k_ce_ring *ce_ring;
dma_addr_t base_addr;
- ce_ring = kzalloc_flex(*ce_ring, skb, nentries, GFP_KERNEL);
+ ce_ring = kzalloc_flex(*ce_ring, skb, nentries);
if (ce_ring == NULL)
return ERR_PTR(-ENOMEM);
if (WARN_ON(!num_channels))
return -EINVAL;
- params = kzalloc_flex(*params, ch_param, num_channels, GFP_KERNEL);
+ params = kzalloc_flex(*params, ch_param, num_channels);
if (!params)
return -ENOMEM;
struct ath12k_ce_ring *ce_ring;
dma_addr_t base_addr;
- ce_ring = kzalloc_flex(*ce_ring, skb, nentries, GFP_KERNEL);
+ ce_ring = kzalloc_flex(*ce_ring, skb, nentries);
if (!ce_ring)
return ERR_PTR(-ENOMEM);
return -EINVAL;
}
- arg = kzalloc_flex(*arg, channel, num_channels, GFP_KERNEL);
+ arg = kzalloc_flex(*arg, channel, num_channels);
if (!arg)
return -ENOMEM;
{
struct brcmf_fweh_info *fweh;
- fweh = kzalloc_flex(*fweh, evt_handler, BRCMF_BCA_E_LAST, GFP_KERNEL);
+ fweh = kzalloc_flex(*fweh, evt_handler, BRCMF_BCA_E_LAST);
if (!fweh)
return -ENOMEM;
brcmf_dbg(TRACE, "Enter\n");
- chunk_buf = kzalloc_flex(*chunk_buf, data, MAX_CHUNK_LEN, GFP_KERNEL);
+ chunk_buf = kzalloc_flex(*chunk_buf, data, MAX_CHUNK_LEN);
if (!chunk_buf) {
err = -ENOMEM;
return -ENOMEM;
{
struct brcmf_fweh_info *fweh;
- fweh = kzalloc_flex(*fweh, evt_handler, BRCMF_CYW_E_LAST, GFP_KERNEL);
+ fweh = kzalloc_flex(*fweh, evt_handler, BRCMF_CYW_E_LAST);
if (!fweh)
return -ENOMEM;
return NULL;
}
- fwreq = kzalloc_flex(*fwreq, items, n_fwnames, GFP_KERNEL);
+ fwreq = kzalloc_flex(*fwreq, items, n_fwnames);
if (!fwreq)
return NULL;
{
struct brcmf_fweh_info *fweh;
- fweh = kzalloc_flex(*fweh, evt_handler, BRCMF_WCC_E_LAST, GFP_KERNEL);
+ fweh = kzalloc_flex(*fweh, evt_handler, BRCMF_WCC_E_LAST);
if (!fweh)
return -ENOMEM;
if (WARN_ON(!cfg || !cfg->eeprom_params))
return NULL;
- data = kzalloc_flex(*data, channels, IWL_NUM_CHANNELS, GFP_KERNEL);
+ data = kzalloc_flex(*data, channels, IWL_NUM_CHANNELS);
if (!data)
return NULL;
u32 len = le32_to_cpu(tlv->length);
struct iwl_dbg_tlv_node *node;
- node = kzalloc_flex(*node, tlv.data, len, GFP_KERNEL);
+ node = kzalloc_flex(*node, tlv.data, len);
if (!node)
return NULL;
num_of_ch);
/* build a regdomain rule for every valid channel */
- regd = kzalloc_flex(*regd, reg_rules, num_of_ch, GFP_KERNEL);
+ regd = kzalloc_flex(*regd, reg_rules, num_of_ch);
if (!regd)
return ERR_PTR(-ENOMEM);
if (empty_otp)
IWL_INFO(trans, "OTP is empty\n");
- nvm = kzalloc_flex(*nvm, channels, IWL_NUM_CHANNELS, GFP_KERNEL);
+ nvm = kzalloc_flex(*nvm, channels, IWL_NUM_CHANNELS);
if (!nvm) {
ret = -ENOMEM;
goto out;
for (int k = 0; k < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; k++)
n_channels +=
hweight8(matches[i].matching_channels[k]);
- match = kzalloc_flex(*match, channels, n_channels, GFP_KERNEL);
+ match = kzalloc_flex(*match, channels, n_channels);
if (!match)
return;
return -EINVAL;
WARN_ON(rcu_access_pointer(mld_sta->ptk_pn[keyidx]));
- *ptk_pn = kzalloc_flex(**ptk_pn, q, num_rx_queues, GFP_KERNEL);
+ *ptk_pn = kzalloc_flex(**ptk_pn, q, num_rx_queues);
if (!*ptk_pn)
return -ENOMEM;
n_matches = 0;
}
- net_detect = kzalloc_flex(*net_detect, matches, n_matches, GFP_KERNEL);
+ net_detect = kzalloc_flex(*net_detect, matches, n_matches);
if (!net_detect || !n_matches)
goto out_report_nd;
net_detect->n_matches = n_matches;
d3_data->nd_results,
i);
- match = kzalloc_flex(*match, channels, n_channels, GFP_KERNEL);
+ match = kzalloc_flex(*match, channels, n_channels);
if (!match)
goto out_report_nd;
match->n_channels = n_channels;
if (WARN_ON_ONCE(num_chan > NL80211_MAX_SUPP_REG_RULES))
return ERR_PTR(-EINVAL);
- regd = kzalloc_flex(*regd, reg_rules, num_chan, GFP_KERNEL);
+ regd = kzalloc_flex(*regd, reg_rules, num_chan);
if (!regd)
return ERR_PTR(-ENOMEM);
mt76_rx_aggr_stop(dev, wcid, tidno);
- tid = kzalloc_flex(*tid, reorder_buf, size, GFP_KERNEL);
+ tid = kzalloc_flex(*tid, reorder_buf, size);
if (!tid)
return -ENOMEM;
goto out;
}
- data = kzalloc_flex(*data, buf, len, GFP_KERNEL);
+ data = kzalloc_flex(*data, buf, len);
if (!data)
goto out;
u16 status;
status = NVME_SC_INTERNAL;
- desc = kmalloc_flex(*desc, nsids, NVMET_MAX_NAMESPACES, GFP_KERNEL);
+ desc = kmalloc_flex(*desc, nsids, NVMET_MAX_NAMESPACES);
if (!desc)
goto out;
if (qid > NVMET_NR_QUEUES)
return NULL;
- queue = kzalloc_flex(*queue, fod, sqsize, GFP_KERNEL);
+ queue = kzalloc_flex(*queue, fod, sqsize);
if (!queue)
return NULL;
int led_idx = 0;
int ret;
- npem = kzalloc_flex(*npem, leds, supported_cnt, GFP_KERNEL);
+ npem = kzalloc_flex(*npem, leds, supported_cnt);
if (!npem)
return -ENOMEM;
{
struct ec_event_queue *q;
- q = kzalloc_flex(*q, entries, capacity, GFP_KERNEL);
+ q = kzalloc_flex(*q, entries, capacity);
if (!q)
return NULL;
u32 fsize;
fsize = dev->dram_size - S2D_RSVD_RAM_SPACE;
- stb_data_arr = kmalloc_flex(*stb_data_arr, data, fsize, GFP_KERNEL);
+ stb_data_arr = kmalloc_flex(*stb_data_arr, data, fsize);
if (!stb_data_arr)
return -ENOMEM;
}
fsize = min(num_samples, S2D_TELEMETRY_BYTES_MAX);
- stb_data_arr = kmalloc_flex(*stb_data_arr, data, fsize, GFP_KERNEL);
+ stb_data_arr = kmalloc_flex(*stb_data_arr, data, fsize);
if (!stb_data_arr)
return -ENOMEM;
int ret;
struct gpiod_lookup_table *lookup __free(kfree) =
- kzalloc_flex(*lookup, table, 2, GFP_KERNEL);
+ kzalloc_flex(*lookup, table, 2);
if (!lookup)
return ERR_PTR(-ENOMEM);
struct gpiod_lookup_table *lookup;
struct gpio_desc *gpiod;
- lookup = kzalloc_flex(*lookup, table, 2, GFP_KERNEL);
+ lookup = kzalloc_flex(*lookup, table, 2);
if (!lookup)
return -ENOMEM;
struct cros_ec_command *msg;
int ret;
- msg = kzalloc_flex(*msg, data, max(outsize, insize), GFP_KERNEL);
+ msg = kzalloc_flex(*msg, data, max(outsize, insize));
if (!msg)
return -ENOMEM;
struct cros_ec_command *msg;
int ret;
- msg = kzalloc_flex(*msg, data, max(outsize, insize), GFP_KERNEL);
+ msg = kzalloc_flex(*msg, data, max(outsize, insize));
if (!msg)
return -ENOMEM;
if (!chip->operational)
return -ENXIO;
- cdata = kzalloc_flex(*cdata, pwm, chip->npwm, GFP_KERNEL);
+ cdata = kzalloc_flex(*cdata, pwm, chip->npwm);
if (!cdata)
return -ENOMEM;
if (num < 0)
return optional ? NULL : ERR_PTR(num);
- resets = kzalloc_flex(*resets, rstc, num, GFP_KERNEL);
+ resets = kzalloc_flex(*resets, rstc, num);
if (!resets)
return ERR_PTR(-ENOMEM);
resets->num_rstcs = num;
return;
rtc->data = data;
- clk_data = kzalloc_flex(*clk_data, hws, 3, GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, 3);
if (!clk_data) {
kfree(rtc);
return;
if (num_devices < 1)
return -EINVAL;
- gdev = kzalloc_flex(*gdev, cdev, num_devices, GFP_KERNEL);
+ gdev = kzalloc_flex(*gdev, cdev, num_devices);
if (!gdev)
return -ENOMEM;
p_dry.length = 0;
spi_engine_compile_message(msg, true, &p_dry);
- p = kzalloc_flex(*p, instructions, p_dry.length + 1, GFP_KERNEL);
+ p = kzalloc_flex(*p, instructions, p_dry.length + 1);
if (!p)
return -ENOMEM;
* More on the problem that it addresses:
* https://www.spinics.net/lists/linux-gpio/msg36218.html
*/
- lookup = kzalloc_flex(*lookup, table, 2, GFP_KERNEL);
+ lookup = kzalloc_flex(*lookup, table, 2);
if (!lookup) {
ret = -ENOMEM;
goto err_cleanup;
goto exit;
}
- raw_data = kmalloc_flex(*raw_data, data, len, GFP_KERNEL);
+ raw_data = kmalloc_flex(*raw_data, data, len);
if (!raw_data) {
retval = -ENOMEM;
goto exit;
{
struct rtw_cbuf *cbuf;
- cbuf = kzalloc_flex(*cbuf, bufs, size, GFP_KERNEL);
+ cbuf = kzalloc_flex(*cbuf, bufs, size);
cbuf->size = size;
return cbuf;
ssize_t len = 0;
int ret = 0, i;
- aio_cmd = kmalloc_flex(*aio_cmd, bvecs, sgl_nents, GFP_KERNEL);
+ aio_cmd = kmalloc_flex(*aio_cmd, bvecs, sgl_nents);
if (!aio_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (!thermal_class)
return ERR_PTR(-ENODEV);
- tz = kzalloc_flex(*tz, trips, num_trips, GFP_KERNEL);
+ tz = kzalloc_flex(*tz, trips, num_trips);
if (!tz)
return ERR_PTR(-ENOMEM);
struct tz_episode *tze;
int i;
- tze = kzalloc_flex(*tze, trip_stats, tz->num_trips, GFP_KERNEL);
+ tze = kzalloc_flex(*tze, trip_stats, tz->num_trips);
if (!tze)
return NULL;
return ERR_PTR(err);
}
- hp = kzalloc_flex(*hp, outbuf, outbuf_size, GFP_KERNEL);
+ hp = kzalloc_flex(*hp, outbuf, outbuf_size);
if (!hp)
return ERR_PTR(-ENOMEM);
unsigned int i;
for (i = 0; i < count; i++) {
- buf = kmalloc_flex(*buf, buf, maxframe, GFP_KERNEL);
+ buf = kmalloc_flex(*buf, buf, maxframe);
if (!buf) {
pr_debug("%s(), kmalloc() failed for %s buffer %u\n",
__func__, name, i);
nr_ports = rc;
}
- priv = kzalloc_flex(*priv, line, nr_ports, GFP_KERNEL);
+ priv = kzalloc_flex(*priv, line, nr_ports);
if (!priv) {
priv = ERR_PTR(-ENOMEM);
goto err_deinit;
nalts[i] = j = USB_MAXALTSETTING;
}
- intfc = kzalloc_flex(*intfc, altsetting, j, GFP_KERNEL);
+ intfc = kzalloc_flex(*intfc, altsetting, j);
config->intf_cache[i] = intfc;
if (!intfc)
return -ENOMEM;
data_len, ret);
data_len -= ret;
- buf = kmalloc_flex(*buf, storage, data_len, GFP_KERNEL);
+ buf = kmalloc_flex(*buf, storage, data_len);
if (!buf)
return -ENOMEM;
buf->length = data_len;
}
/* allocate and initialize one new instance */
- midi = kzalloc_flex(*midi, in_ports_array, opts->in_ports, GFP_KERNEL);
+ midi = kzalloc_flex(*midi, in_ports_array, opts->in_ports);
if (!midi) {
status = -ENOMEM;
goto setup_fail;
struct f_phonet *fp;
struct f_phonet_opts *opts;
- fp = kzalloc_flex(*fp, out_reqv, phonet_rxq_size, GFP_KERNEL);
+ fp = kzalloc_flex(*fp, out_reqv, phonet_rxq_size);
if (!fp)
return ERR_PTR(-ENOMEM);
else
len = 1;
- sch_ep = kzalloc_flex(*sch_ep, bw_budget_table, len, GFP_KERNEL);
+ sch_ep = kzalloc_flex(*sch_ep, bw_budget_table, len);
if (!sch_ep)
return ERR_PTR(-ENOMEM);
return -EOPNOTSUPP;
if (mem.nregions > max_mem_regions)
return -E2BIG;
- newmem = kvzalloc_flex(*newmem, regions, mem.nregions, GFP_KERNEL);
+ newmem = kvzalloc_flex(*newmem, regions, mem.nregions);
if (!newmem)
return -ENOMEM;
struct mmp_path *path = NULL;
struct mmp_panel *panel;
- path = kzalloc_flex(*path, overlays, info->overlay_num, GFP_KERNEL);
+ path = kzalloc_flex(*path, overlays, info->overlay_num);
if (!path)
return NULL;
const struct bin_attribute **attrs __free(kfree) =
kzalloc(sizeof(*attrs) * (tm->nr_mrs + 1) + nlen, GFP_KERNEL);
struct tm_context *ctx __free(kfree) =
- kzalloc_flex(*ctx, mrs, tm->nr_mrs, GFP_KERNEL);
+ kzalloc_flex(*ctx, mrs, tm->nr_mrs);
char *name, *end;
if (!ctx || !attrs)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma_priv = kzalloc_flex(*vma_priv, pages, count, GFP_KERNEL);
+ vma_priv = kzalloc_flex(*vma_priv, pages, count);
if (!vma_priv)
return -ENOMEM;
if (len > XENSTORE_PAYLOAD_MAX)
return -EINVAL;
- rb = kmalloc_flex(*rb, msg, len, GFP_KERNEL);
+ rb = kmalloc_flex(*rb, msg, len);
if (rb == NULL)
return -ENOMEM;
int error;
/* Initialize the Zorro bus */
- bus = kzalloc_flex(*bus, devices, zorro_num_autocon, GFP_KERNEL);
+ bus = kzalloc_flex(*bus, devices, zorro_num_autocon);
if (!bus)
return -ENOMEM;
if (nr > AFS_MAX_ADDRESSES)
nr = AFS_MAX_ADDRESSES;
- alist = kzalloc_flex(*alist, addrs, nr, GFP_KERNEL);
+ alist = kzalloc_flex(*alist, addrs, nr);
if (!alist)
return NULL;
max_prefs = min_t(size_t, (psize - sizeof(*old)) / sizeof(old->prefs[0]), 255);
ret = -ENOMEM;
- preflist = kmalloc_flex(*preflist, prefs, max_prefs, GFP_KERNEL);
+ preflist = kmalloc_flex(*preflist, prefs, max_prefs);
if (!preflist)
goto done;
size = call->count2 = ntohl(call->tmp);
size = round_up(size, 4);
- acl = kmalloc_flex(*acl, data, size, GFP_KERNEL);
+ acl = kmalloc_flex(*acl, data, size);
if (!acl)
return -ENOMEM;
op->acl = acl;
newrep++;
}
- slist = kzalloc_flex(*slist, servers, nr_servers, GFP_KERNEL);
+ slist = kzalloc_flex(*slist, servers, nr_servers);
if (!slist)
goto error;
struct afs_vlserver *vlserver;
static atomic_t debug_ids;
- vlserver = kzalloc_flex(*vlserver, name, name_len + 1, GFP_KERNEL);
+ vlserver = kzalloc_flex(*vlserver, name, name_len + 1);
if (vlserver) {
refcount_set(&vlserver->ref, 1);
rwlock_init(&vlserver->lock);
{
struct afs_vlserver_list *vllist;
- vllist = kzalloc_flex(*vllist, servers, nr_servers, GFP_KERNEL);
+ vllist = kzalloc_flex(*vllist, servers, nr_servers);
if (vllist) {
refcount_set(&vllist->ref, 1);
rwlock_init(&vllist->lock);
{
struct afs_acl *acl;
- acl = kmalloc_flex(*acl, data, size, GFP_KERNEL);
+ acl = kmalloc_flex(*acl, data, size);
if (!acl) {
afs_op_nomem(op);
return false;
size = round_up(size, 4);
if (yacl->flags & YFS_ACL_WANT_ACL) {
- acl = kmalloc_flex(*acl, data, size, GFP_KERNEL);
+ acl = kmalloc_flex(*acl, data, size);
if (!acl)
return -ENOMEM;
yacl->acl = acl;
size = round_up(size, 4);
if (yacl->flags & YFS_ACL_WANT_VOL_ACL) {
- acl = kmalloc_flex(*acl, data, size, GFP_KERNEL);
+ acl = kmalloc_flex(*acl, data, size);
if (!acl)
return -ENOMEM;
yacl->vol_acl = acl;
new_nr = (table ? table->nr : 1) * 4;
spin_unlock(&mm->ioctx_lock);
- table = kzalloc_flex(*table, table, new_nr, GFP_KERNEL);
+ table = kzalloc_flex(*table, table, new_nr);
if (!table)
return -ENOMEM;
info->thread->task = dump_task;
for (ct = dump_task->signal->core_state->dumper.next; ct; ct = ct->next) {
- t = kzalloc_flex(*t, notes, info->thread_notes, GFP_KERNEL);
+ t = kzalloc_flex(*t, notes, info->thread_notes);
if (unlikely(!t))
return 0;
if (nloads == 0)
return -ELIBBAD;
- loadmap = kzalloc_flex(*loadmap, segs, nloads, GFP_KERNEL);
+ loadmap = kzalloc_flex(*loadmap, segs, nloads);
if (!loadmap)
return -ENOMEM;
const blk_opf_t write_flags = wbc_to_write_flags(wbc);
nofs_flag = memalloc_nofs_save();
- ctx = kvmalloc_flex(*ctx, chunks, num_chunks, GFP_KERNEL);
+ ctx = kvmalloc_flex(*ctx, chunks, num_chunks);
memalloc_nofs_restore(nofs_flag);
if (!ctx)
return false;
* Try harder to allocate and fallback to vmalloc to lower the chance
* of a failing mount.
*/
- table = kvzalloc_flex(*table, table, num_entries, GFP_KERNEL);
+ table = kvzalloc_flex(*table, table, num_entries);
if (!table)
return -ENOMEM;
p = &info->root.rb_node;
/* Create and allocate the fname structure */
- new_fn = kzalloc_flex(*new_fn, name, ent_name->len + 1, GFP_KERNEL);
+ new_fn = kzalloc_flex(*new_fn, name, ent_name->len + 1);
if (!new_fn)
return -ENOMEM;
new_fn->hash = hash;
int nr;
bool restart = false;
- isw = kzalloc_flex(*isw, inodes, WB_MAX_INODES_PER_ISW, GFP_KERNEL);
+ isw = kzalloc_flex(*isw, inodes, WB_MAX_INODES_PER_ISW);
if (!isw)
return restart;
size_t i;
*size = jffs2_acl_size(acl->a_count);
- header = kmalloc_flex(*header, a_entries, acl->a_count, GFP_KERNEL);
+ header = kmalloc_flex(*header, a_entries, acl->a_count);
if (!header)
return ERR_PTR(-ENOMEM);
header->a_version = cpu_to_je32(JFFS2_ACL_VERSION);
if (IS_ERR(cache))
return NULL;
- volume = kzalloc_flex(*volume, coherency, coherency_len, GFP_KERNEL);
+ volume = kzalloc_flex(*volume, coherency, coherency_len);
if (!volume)
goto err_cache;
* layouts, so make sure to zero the whole structure.
*/
nfserr = nfserrno(-ENOMEM);
- bl = kzalloc_flex(*bl, extents, nr_extents_max, GFP_KERNEL);
+ bl = kzalloc_flex(*bl, extents, nr_extents_max);
if (!bl)
goto out_error;
bl->nr_extents = nr_extents_max;
struct pnfs_block_deviceaddr *dev;
struct pnfs_block_volume *b;
- dev = kzalloc_flex(*dev, volumes, 1, GFP_KERNEL);
+ dev = kzalloc_flex(*dev, volumes, 1);
if (!dev)
return -ENOMEM;
gdp->gd_device = dev;
const struct pr_ops *ops;
int ret;
- dev = kzalloc_flex(*dev, volumes, 1, GFP_KERNEL);
+ dev = kzalloc_flex(*dev, volumes, 1);
if (!dev)
return -ENOMEM;
gdp->gd_device = dev;
{
struct svcxdr_tmpbuf *tb;
- tb = kmalloc_flex(*tb, buf, len, GFP_KERNEL);
+ tb = kmalloc_flex(*tb, buf, len);
if (!tb)
return NULL;
tb->next = argp->to_free;
osb->recovery_thread_task = NULL;
init_waitqueue_head(&osb->recovery_event);
- rm = kzalloc_flex(*rm, rm_entries, osb->max_slots, GFP_KERNEL);
+ rm = kzalloc_flex(*rm, rm_entries, osb->max_slots);
if (!rm) {
mlog_errno(-ENOMEM);
return -ENOMEM;
struct inode *inode = NULL;
struct ocfs2_slot_info *si;
- si = kzalloc_flex(*si, si_slots, osb->max_slots, GFP_KERNEL);
+ si = kzalloc_flex(*si, si_slots, osb->max_slots);
if (!si) {
status = -ENOMEM;
mlog_errno(status);
{
struct ovl_cache_entry *p;
- p = kmalloc_flex(*p, name, len + 1, GFP_KERNEL);
+ p = kmalloc_flex(*p, name, len + 1);
if (!p)
return NULL;
{
struct ovl_entry *oe;
- oe = kzalloc_flex(*oe, __lowerstack, numlower, GFP_KERNEL);
+ oe = kzalloc_flex(*oe, __lowerstack, numlower);
if (oe)
oe->__numlower = numlower;
goto out;
}
- cc_req = kzalloc_flex(*cc_req, Chunks, chunk_count, GFP_KERNEL);
+ cc_req = kzalloc_flex(*cc_req, Chunks, chunk_count);
if (!cc_req) {
rc = -ENOMEM;
goto out;
struct udf_bitmap *bitmap;
int nr_groups = udf_compute_nr_groups(sb, index);
- bitmap = kvzalloc_flex(*bitmap, s_block_bitmap, nr_groups, GFP_KERNEL);
+ bitmap = kvzalloc_flex(*bitmap, s_block_bitmap, nr_groups);
if (!bitmap)
return NULL;
struct xfs_buftarg *btp;
int error;
- btp = kzalloc_flex(*btp, bt_cache, 1, GFP_KERNEL);
+ btp = kzalloc_flex(*btp, bt_cache, 1);
if (!btp)
return -ENOMEM;
{
struct dir_entry *de;
- de = kmalloc_flex(*de, name, nlen, GFP_KERNEL);
+ de = kmalloc_flex(*de, name, nlen);
if (!de)
panic_show_mem("can't allocate dir_entry buffer");
INIT_LIST_HEAD(&de->list);
{
if (nr_bvecs <= IO_CACHED_BVECS_SEGS)
return io_cache_alloc(&ctx->imu_cache, GFP_KERNEL);
- return kvmalloc_flex(struct io_mapped_ubuf, bvec, nr_bvecs, GFP_KERNEL);
+ return kvmalloc_flex(struct io_mapped_ubuf, bvec, nr_bvecs);
}
static void io_free_imu(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
if (err < 0)
return err;
}
- sig_data = kmalloc_flex(*sig_data, ctx, lsmctx.len, GFP_KERNEL);
+ sig_data = kmalloc_flex(*sig_data, ctx, lsmctx.len);
if (!sig_data) {
if (lsmprop_is_set(&audit_sig_lsm))
security_release_secctx(&lsmctx);
size_t sz;
sz = strlen(s) + 1;
- tree = kmalloc_flex(*tree, pathname, sz, GFP_KERNEL);
+ tree = kmalloc_flex(*tree, pathname, sz);
if (tree) {
refcount_set(&tree->count, 1);
tree->goner = 0;
struct audit_chunk *chunk;
int i;
- chunk = kzalloc_flex(*chunk, owners, count, GFP_KERNEL);
+ chunk = kzalloc_flex(*chunk, owners, count);
if (!chunk)
return NULL;
void *bufp;
int i;
- data = kzalloc_flex(*data, buf, krule->buflen, GFP_KERNEL);
+ data = kzalloc_flex(*data, buf, krule->buflen);
if (unlikely(!data))
return NULL;
tab = btf->struct_ops_tab;
if (!tab) {
- tab = kzalloc_flex(*tab, ops, 4, GFP_KERNEL);
+ tab = kzalloc_flex(*tab, ops, 4);
if (!tab)
return -ENOMEM;
tab->capacity = 4;
int ret;
/* allocate the cgroup and its ID, 0 is reserved for the root */
- cgrp = kzalloc_flex(*cgrp, _low_ancestors, level, GFP_KERNEL);
+ cgrp = kzalloc_flex(*cgrp, _low_ancestors, level);
if (!cgrp)
return ERR_PTR(-ENOMEM);
/* Dry-run to get the actual buffer size. */
size = convert_to_gcda(NULL, info);
- iter = kvmalloc_flex(*iter, buffer, size, GFP_KERNEL);
+ iter = kvmalloc_flex(*iter, buffer, size);
if (!iter)
return NULL;
{
struct irq_chip_generic *gc;
- gc = kzalloc_flex(*gc, chip_types, num_ct, GFP_KERNEL);
+ gc = kzalloc_flex(*gc, chip_types, num_ct);
if (gc) {
irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
handler);
unsigned int cpu, matrix_size = BITS_TO_LONGS(matrix_bits);
struct irq_matrix *m;
- m = kzalloc_flex(*m, scratch_map, matrix_size * 2, GFP_KERNEL);
+ m = kzalloc_flex(*m, scratch_map, matrix_size * 2);
if (!m)
return NULL;
} while (c->nr_garbage && collect_garbage_slots(c) == 0);
/* All out of space. Need to allocate a new page. */
- kip = kmalloc_flex(*kip, slot_used, slots_per_page(c), GFP_KERNEL);
+ kip = kmalloc_flex(*kip, slot_used, slots_per_page(c));
if (!kip)
return NULL;
for (i = 0; i < info->hdr->e_shnum; i++)
if (!sect_empty(&info->sechdrs[i]))
nloaded++;
- sect_attrs = kzalloc_flex(*sect_attrs, attrs, nloaded, GFP_KERNEL);
+ sect_attrs = kzalloc_flex(*sect_attrs, attrs, nloaded);
if (!sect_attrs)
return -ENOMEM;
if (notes == 0)
return 0;
- notes_attrs = kzalloc_flex(*notes_attrs, attrs, notes, GFP_KERNEL);
+ notes_attrs = kzalloc_flex(*notes_attrs, attrs, notes);
if (!notes_attrs)
return -ENOMEM;
return -E2BIG;
fp->entry_data_size = size;
- hlist_array = kzalloc_flex(*hlist_array, array, num, GFP_KERNEL);
+ hlist_array = kzalloc_flex(*hlist_array, array, num);
if (!hlist_array)
return -ENOMEM;
sys_name = event->class->system;
event_name = trace_event_name(event);
- ep = kzalloc_flex(*ep, tp.args, nargs, GFP_KERNEL);
+ ep = kzalloc_flex(*ep, tp.args, nargs);
if (!ep) {
trace_event_put_ref(event);
return ERR_PTR(-ENOMEM);
struct trace_fprobe *tf __free(free_trace_fprobe) = NULL;
int ret = -ENOMEM;
- tf = kzalloc_flex(*tf, tp.args, nargs, GFP_KERNEL);
+ tf = kzalloc_flex(*tf, tp.args, nargs);
if (!tf)
return ERR_PTR(ret);
struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
int ret = -ENOMEM;
- tk = kzalloc_flex(*tk, tp.args, nargs, GFP_KERNEL);
+ tk = kzalloc_flex(*tk, tp.args, nargs);
if (!tk)
return ERR_PTR(ret);
struct trace_uprobe *tu;
int ret;
- tu = kzalloc_flex(*tu, tp.args, nargs, GFP_KERNEL);
+ tu = kzalloc_flex(*tu, tp.args, nargs);
if (!tu)
return ERR_PTR(-ENOMEM);
static inline void *allocate_probes(int count)
{
- struct tp_probes *p = kmalloc_flex(*p, probes, count, GFP_KERNEL);
+ struct tp_probes *p = kmalloc_flex(*p, probes, count);
return p == NULL ? NULL : p->probes;
}
* user-specified filters.
*/
ret = -ENOMEM;
- wfilter = kzalloc_flex(*wfilter, filters, nr_filter, GFP_KERNEL);
+ wfilter = kzalloc_flex(*wfilter, filters, nr_filter);
if (!wfilter)
goto err_filter;
wfilter->nr_filters = nr_filter;
attrs->affn_scope >= WQ_AFFN_NR_TYPES))
return ERR_PTR(-EINVAL);
- ctx = kzalloc_flex(*ctx, pwq_tbl, nr_cpu_ids, GFP_KERNEL);
+ ctx = kzalloc_flex(*ctx, pwq_tbl, nr_cpu_ids);
new_attrs = alloc_workqueue_attrs();
if (!ctx || !new_attrs)
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
- new_s0 = kzalloc_flex(*new_s0, index_key, keylen, GFP_KERNEL);
+ new_s0 = kzalloc_flex(*new_s0, index_key, keylen);
if (!new_s0)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
- new_s0 = kzalloc_flex(*new_s0, index_key, keylen, GFP_KERNEL);
+ new_s0 = kzalloc_flex(*new_s0, index_key, keylen);
if (!new_s0)
return false;
edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
- new_s1 = kzalloc_flex(*new_s1, index_key, keylen, GFP_KERNEL);
+ new_s1 = kzalloc_flex(*new_s1, index_key, keylen);
if (!new_s1)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
shortcut = assoc_array_ptr_to_shortcut(cursor);
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
- new_s = kmalloc_flex(*new_s, index_key, keylen, GFP_KERNEL);
+ new_s = kmalloc_flex(*new_s, index_key, keylen);
if (!new_s)
goto enomem;
pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
int which, err;
/* Allocate the table of programs to be used for tail calls */
- progs = kzalloc_flex(*progs, ptrs, ntests + 1, GFP_KERNEL);
+ progs = kzalloc_flex(*progs, ptrs, ntests + 1);
if (!progs)
goto out_nomem;
struct hugetlb_cgroup *h_cgroup;
int node;
- h_cgroup = kzalloc_flex(*h_cgroup, nodeinfo, nr_node_ids, GFP_KERNEL);
+ h_cgroup = kzalloc_flex(*h_cgroup, nodeinfo, nr_node_ids);
if (!h_cgroup)
return ERR_PTR(-ENOMEM);
/* Add 1 for NUL terminator at the end of the anon_name->name */
count = strlen(name) + 1;
- anon_name = kmalloc_flex(*anon_name, name, count, GFP_KERNEL);
+ anon_name = kmalloc_flex(*anon_name, name, count);
if (anon_name) {
kref_init(&anon_name->kref);
memcpy(anon_name->name, name, count);
{
int nid, err;
- wi_group = kzalloc_flex(*wi_group, nattrs, nr_node_ids, GFP_KERNEL);
+ wi_group = kzalloc_flex(*wi_group, nattrs, nr_node_ids);
if (!wi_group)
return -ENOMEM;
mutex_init(&wi_group->kobj_lock);
if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
return -EINVAL;
- dl = kzalloc_flex(*dl, dev_req, dev_num, GFP_KERNEL);
+ dl = kzalloc_flex(*dl, dev_req, dev_num);
if (!dl)
return -ENOMEM;
i++;
}
- rp = kmalloc_flex(*rp, addr, i, GFP_KERNEL);
+ rp = kmalloc_flex(*rp, addr, i);
if (!rp) {
err = -ENOMEM;
goto unlock;
if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di))
return -EINVAL;
- dl = kzalloc_flex(*dl, dev_info, dev_num, GFP_KERNEL);
+ dl = kzalloc_flex(*dl, dev_info, dev_num);
if (!dl)
return -ENOMEM;
nr_maps++;
}
- diag = kzalloc_flex(*diag, maps, nr_maps, GFP_KERNEL);
+ diag = kzalloc_flex(*diag, maps, nr_maps);
if (!diag)
return ERR_PTR(-ENOMEM);
static struct flush_backlogs *flush_backlogs_alloc(void)
{
- return kmalloc_flex(struct flush_backlogs, w, nr_cpu_ids, GFP_KERNEL);
+ return kmalloc_flex(struct flush_backlogs, w, nr_cpu_ids);
}
static struct flush_backlogs *flush_backlogs_fallback;
struct flow_rule *rule;
int i;
- rule = kzalloc_flex(*rule, action.entries, num_actions, GFP_KERNEL);
+ rule = kzalloc_flex(*rule, action.entries, num_actions);
if (!rule)
return NULL;
if (!devlink_reload_actions_valid(ops))
return NULL;
- devlink = kvzalloc_flex(*devlink, priv, priv_size, GFP_KERNEL);
+ devlink = kvzalloc_flex(*devlink, priv, priv_size);
if (!devlink)
return NULL;
if (rule_cnt <= 0)
return -EINVAL;
- info = kvzalloc_flex(*info, rule_locs, rule_cnt, GFP_KERNEL);
+ info = kvzalloc_flex(*info, rule_locs, rule_cnt);
if (!info)
return -ENOMEM;
if (rule_cnt < 0)
return -EINVAL;
- info = kvzalloc_flex(*info, rule_locs, rule_cnt, GFP_KERNEL);
+ info = kvzalloc_flex(*info, rule_locs, rule_cnt);
if (!info)
return -ENOMEM;
fib_info_hash_grow(net);
- fi = kzalloc_flex(*fi, fib_nh, nhs, GFP_KERNEL);
+ fi = kzalloc_flex(*fi, fib_nh, nhs);
if (!fi) {
err = -ENOBUFS;
goto failure;
{
struct nh_group *nhg;
- nhg = kzalloc_flex(*nhg, nh_entries, num_nh, GFP_KERNEL);
+ nhg = kzalloc_flex(*nhg, nh_entries, num_nh);
if (nhg)
nhg->num_nh = num_nh;
struct udp_tunnel_nic *utn;
unsigned int i;
- utn = kzalloc_flex(*utn, entries, n_tables, GFP_KERNEL);
+ utn = kzalloc_flex(*utn, entries, n_tables);
if (!utn)
return NULL;
utn->n_tables = n_tables;
if (field_count > NFT_PIPAPO_MAX_FIELDS)
return -EINVAL;
- m = kmalloc_flex(*m, f, field_count, GFP_KERNEL);
+ m = kmalloc_flex(*m, f, field_count);
if (!m)
return -ENOMEM;
if (size < 16)
size = 16;
}
- hinfo = kvmalloc_flex(*hinfo, hash, size, GFP_KERNEL);
+ hinfo = kvmalloc_flex(*hinfo, hash, size);
if (hinfo == NULL)
return -ENOMEM;
*out_hinfo = hinfo;
goto out;
}
- t = kvzalloc_flex(*t, iphash, ip_list_hash_size, GFP_KERNEL);
+ t = kvzalloc_flex(*t, iphash, ip_list_hash_size);
if (t == NULL) {
ret = -ENOMEM;
goto out;
{
struct dp_meter_instance *ti;
- ti = kvzalloc_flex(*ti, dp_meters, size, GFP_KERNEL);
+ ti = kvzalloc_flex(*ti, dp_meters, size);
if (!ti)
return NULL;
void *key = tc_u_common_ptr(tp);
struct tc_u_common *tp_c = tc_u_common_find(key);
- root_ht = kzalloc_flex(*root_ht, ht, 1, GFP_KERNEL);
+ root_ht = kzalloc_flex(*root_ht, ht, 1);
if (root_ht == NULL)
return -ENOBUFS;
struct tc_u32_sel *s = &n->sel;
struct tc_u_knode *new;
- new = kzalloc_flex(*new, sel.keys, s->nkeys, GFP_KERNEL);
+ new = kzalloc_flex(*new, sel.keys, s->nkeys);
if (!new)
return NULL;
NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
return -EINVAL;
}
- ht = kzalloc_flex(*ht, ht, divisor + 1, GFP_KERNEL);
+ ht = kzalloc_flex(*ht, ht, divisor + 1);
if (ht == NULL)
return -ENOBUFS;
if (handle == 0) {
goto erridr;
}
- n = kzalloc_flex(*n, sel.keys, s->nkeys, GFP_KERNEL);
+ n = kzalloc_flex(*n, sel.keys, s->nkeys);
if (n == NULL) {
err = -ENOBUFS;
goto erridr;
goto nla_put_failure;
}
#ifdef CONFIG_CLS_U32_PERF
- gpf = kzalloc_flex(*gpf, kcnts, n->sel.nkeys, GFP_KERNEL);
+ gpf = kzalloc_flex(*gpf, kcnts, n->sel.nkeys);
if (!gpf)
goto nla_put_failure;
return ERR_PTR(-EINVAL);
}
- stab = kmalloc_flex(*stab, data, tsize, GFP_KERNEL);
+ stab = kmalloc_flex(*stab, data, tsize);
if (!stab)
return ERR_PTR(-ENOMEM);
if (!n || n > NETEM_DIST_MAX)
return -EINVAL;
- d = kvmalloc_flex(*d, table, n, GFP_KERNEL);
+ d = kvmalloc_flex(*d, table, n);
if (!d)
return -ENOMEM;
return ERR_PTR(sendpages);
pages = svc_serv_maxpages(serv);
- svsk = kzalloc_flex(*svsk, sk_pages, pages, GFP_KERNEL);
+ svsk = kzalloc_flex(*svsk, sk_pages, pages);
if (!svsk)
return ERR_PTR(-ENOMEM);
{
struct svc_rdma_chunk *chunk;
- chunk = kmalloc_flex(*chunk, ch_segments, segcount, GFP_KERNEL);
+ chunk = kmalloc_flex(*chunk, ch_segments, segcount);
if (!chunk)
return NULL;
if (n_entries > wiphy->max_acl_mac_addrs)
return ERR_PTR(-EOPNOTSUPP);
- acl = kzalloc_flex(*acl, mac_addrs, n_entries, GFP_KERNEL);
+ acl = kzalloc_flex(*acl, mac_addrs, n_entries);
if (!acl)
return ERR_PTR(-ENOMEM);
acl->n_acl_entries = n_entries;
num_elems++;
}
- elems = kzalloc_flex(*elems, elem, num_elems, GFP_KERNEL);
+ elems = kzalloc_flex(*elems, elem, num_elems);
if (!elems)
return ERR_PTR(-ENOMEM);
elems->cnt = num_elems;
num_elems++;
}
- elems = kzalloc_flex(*elems, elem, num_elems, GFP_KERNEL);
+ elems = kzalloc_flex(*elems, elem, num_elems);
if (!elems)
return ERR_PTR(-ENOMEM);
elems->cnt = num_elems;
goto out;
}
- rd = kzalloc_flex(*rd, reg_rules, num_rules, GFP_KERNEL);
+ rd = kzalloc_flex(*rd, reg_rules, num_rules);
if (!rd) {
r = -ENOMEM;
goto out;
if (n_rules > coalesce->n_rules)
return -EINVAL;
- new_coalesce = kzalloc_flex(*new_coalesce, rules, n_rules, GFP_KERNEL);
+ new_coalesce = kzalloc_flex(*new_coalesce, rules, n_rules);
if (!new_coalesce)
return -ENOMEM;
rem_conf)
num_conf++;
- tid_config = kzalloc_flex(*tid_config, tid_conf, num_conf, GFP_KERNEL);
+ tid_config = kzalloc_flex(*tid_config, tid_conf, num_conf);
if (!tid_config)
return -ENOMEM;
if (specs > rdev->wiphy.sar_capa->num_freq_ranges)
return -EINVAL;
- sar_spec = kzalloc_flex(*sar_spec, sub_specs, specs, GFP_KERNEL);
+ sar_spec = kzalloc_flex(*sar_spec, sub_specs, specs);
if (!sar_spec)
return -ENOMEM;
}
}
- req = kzalloc_flex(*req, peers, count, GFP_KERNEL);
+ req = kzalloc_flex(*req, peers, count);
if (!req)
return -ENOMEM;
req->n_peers = count;
struct ieee80211_regdomain *regd;
unsigned int i;
- regd = kzalloc_flex(*regd, reg_rules, src_regd->n_reg_rules, GFP_KERNEL);
+ regd = kzalloc_flex(*regd, reg_rules, src_regd->n_reg_rules);
if (!regd)
return ERR_PTR(-ENOMEM);
struct ieee80211_regdomain *regdom;
unsigned int i;
- regdom = kzalloc_flex(*regdom, reg_rules, coll->n_rules, GFP_KERNEL);
+ regdom = kzalloc_flex(*regdom, reg_rules, coll->n_rules);
if (!regdom)
return -ENOMEM;
if (!num_rules)
return NULL;
- rd = kzalloc_flex(*rd, reg_rules, num_rules, GFP_KERNEL);
+ rd = kzalloc_flex(*rd, reg_rules, num_rules);
if (!rd)
return NULL;
if (!n_channels)
return cfg80211_scan_6ghz(rdev, true);
- request = kzalloc_flex(*request, req.channels, n_channels, GFP_KERNEL);
+ request = kzalloc_flex(*request, req.channels, n_channels);
if (!request)
return -ENOMEM;
u32 i, entries;
entries = unaligned ? umem->chunks : 0;
- pool = kvzalloc_flex(*pool, free_heads, entries, GFP_KERNEL);
+ pool = kvzalloc_flex(*pool, free_heads, entries);
if (!pool)
goto out;
buf_len -= sig_len + sizeof(*sig);
/* Allocate sig_len additional bytes to hold the raw PKCS#7 data. */
- hdr = kzalloc_flex(*hdr, raw_pkcs7, sig_len, GFP_KERNEL);
+ hdr = kzalloc_flex(*hdr, raw_pkcs7, sig_len);
if (!hdr)
return -ENOMEM;
return ERR_PTR(-EINVAL);
}
- opt_list = kzalloc_flex(*opt_list, items, count, GFP_KERNEL);
+ opt_list = kzalloc_flex(*opt_list, items, count);
if (!opt_list) {
kfree(src_copy);
return ERR_PTR(-ENOMEM);
* caller.
*/
details =
- kzalloc_flex(*details, exe_path, path_size, GFP_KERNEL);
+ kzalloc_flex(*details, exe_path, path_size);
if (!details)
return ERR_PTR(-ENOMEM);
len /= 2;
- trd = kzalloc_flex(*trd, data, len, GFP_KERNEL);
+ trd = kzalloc_flex(*trd, data, len);
if (!trd) {
rc = -ENOMEM;
goto err;
if (count == 0 || count > MAX_CONTROL_COUNT)
return -EINVAL;
- *kctl = kzalloc_flex(**kctl, vd, count, GFP_KERNEL);
+ *kctl = kzalloc_flex(**kctl, vd, count);
if (!*kctl)
return -ENOMEM;
struct link_master *master_link = snd_kcontrol_chip(master);
struct link_follower *srec;
- srec = kzalloc_flex(*srec, follower.vd, follower->count, GFP_KERNEL);
+ srec = kzalloc_flex(*srec, follower.vd, follower->count);
if (!srec)
return -ENOMEM;
srec->kctl = follower;
{
struct hda_conn_list *p;
- p = kmalloc_flex(*p, conns, len, GFP_KERNEL);
+ p = kmalloc_flex(*p, conns, len);
if (!p)
return -ENOMEM;
p->len = len;
length -= sizeof(*data_chunk);
- data = kzalloc_flex(*data, data, length, GFP_KERNEL);
+ data = kzalloc_flex(*data, data, length);
if (!data)
return -ENOMEM;
if (len < 3)
return -EINVAL;
- data = kzalloc_flex(*data, data, size_sub(len, 2), GFP_KERNEL);
+ data = kzalloc_flex(*data, data, size_sub(len, 2));
if (!data)
return -ENOMEM;
list_for_each(it, widgets)
size++;
- *list = kzalloc_flex(**list, widgets, size, GFP_KERNEL);
+ *list = kzalloc_flex(**list, widgets, size);
if (*list == NULL)
return -ENOMEM;
break;
}
- xoops = kzalloc_flex(*xoops, ar, XTENSA_CORE_AR_REGS_COUNT, GFP_KERNEL);
+ xoops = kzalloc_flex(*xoops, ar, XTENSA_CORE_AR_REGS_COUNT);
if (!xoops)
goto free_block;
struct urb *urb;
if (usx2y->rate != rate) {
- us = kzalloc_flex(*us, urb, NOOF_SETRATE_URBS, GFP_KERNEL);
+ us = kzalloc_flex(*us, urb, NOOF_SETRATE_URBS);
if (!us) {
err = -ENOMEM;
goto cleanup;
int sg_num = virtsnd_pcm_sg_num(data, period_bytes);
struct virtio_pcm_msg *msg;
- msg = kzalloc_flex(*msg, sgs, sg_num + 2, GFP_KERNEL);
+ msg = kzalloc_flex(*msg, sgs, sg_num + 2);
if (!msg)
return -ENOMEM;