wait_queue_head_t go; /* start crc update */
wait_queue_head_t done; /* crc update done */
u32 *crc32; /* points to handle's crc32 */
- size_t *unc_len[CMP_THREADS]; /* uncompressed lengths */
- unsigned char *unc[CMP_THREADS]; /* uncompressed data */
+ size_t **unc_len; /* uncompressed lengths */
+ unsigned char **unc; /* uncompressed data */
};
+static struct crc_data *alloc_crc_data(int nr_threads)
+{
+ struct crc_data *crc;
+
+ crc = kzalloc(sizeof(*crc), GFP_KERNEL);
+ if (!crc)
+ return NULL;
+
+ crc->unc = kcalloc(nr_threads, sizeof(*crc->unc), GFP_KERNEL);
+ if (!crc->unc)
+ goto err_free_crc;
+
+ crc->unc_len = kcalloc(nr_threads, sizeof(*crc->unc_len), GFP_KERNEL);
+ if (!crc->unc_len)
+ goto err_free_unc;
+
+ return crc;
+
+err_free_unc:
+ kfree(crc->unc);
+err_free_crc:
+ kfree(crc);
+ return NULL;
+}
+
+static void free_crc_data(struct crc_data *crc)
+{
+ if (!crc)
+ return;
+
+ if (crc->thr)
+ kthread_stop(crc->thr);
+
+ kfree(crc->unc_len);
+ kfree(crc->unc);
+ kfree(crc);
+}
+
/*
* CRC32 update function that runs in its own thread.
*/
goto out_clean;
}
- crc = kzalloc(sizeof(*crc), GFP_KERNEL);
+ crc = alloc_crc_data(nr_threads);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
out_clean:
hib_finish_batch(&hb);
- if (crc) {
- if (crc->thr)
- kthread_stop(crc->thr);
- kfree(crc);
- }
+ free_crc_data(crc);
if (data) {
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
goto out_clean;
}
- crc = kzalloc(sizeof(*crc), GFP_KERNEL);
+ crc = alloc_crc_data(nr_threads);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
hib_finish_batch(&hb);
for (i = 0; i < ring_size; i++)
free_page((unsigned long)page[i]);
- if (crc) {
- if (crc->thr)
- kthread_stop(crc->thr);
- kfree(crc);
- }
+ free_crc_data(crc);
if (data) {
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)