if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(&v->fec->extra_pool, GFP_NOWAIT);
+ fio->bufs[n] = kmem_cache_alloc(v->fec->cache, GFP_NOWAIT);
/* we can manage with even one buffer if necessary */
if (unlikely(!fio->bufs[n]))
break;
mempool_free(fio->bufs[n], &f->prealloc_pool);
fec_for_each_extra_buffer(fio, n)
- mempool_free(fio->bufs[n], &f->extra_pool);
+ if (fio->bufs[n])
+ kmem_cache_free(f->cache, fio->bufs[n]);
mempool_free(fio->output, &f->output_pool);
}
mempool_exit(&f->rs_pool);
mempool_exit(&f->prealloc_pool);
- mempool_exit(&f->extra_pool);
mempool_exit(&f->output_pool);
kmem_cache_destroy(f->cache);
return ret;
}
- ret = mempool_init_slab_pool(&f->extra_pool, 0, f->cache);
- if (ret) {
- ti->error = "Cannot allocate FEC buffer extra pool";
- return ret;
- }
-
/* Preallocate an output buffer for each thread */
ret = mempool_init_kmalloc_pool(&f->output_pool, num_online_cpus(),
1 << v->data_dev_block_bits);
unsigned char rsn; /* N of RS(M, N) */
mempool_t rs_pool; /* mempool for fio->rs */
mempool_t prealloc_pool; /* mempool for preallocated buffers */
- mempool_t extra_pool; /* mempool for extra buffers */
mempool_t output_pool; /* mempool for output */
struct kmem_cache *cache; /* cache for buffers */
atomic64_t corrected; /* corrected errors */