Provide and use folio_queue allocation and free functions to combine the
allocation, initialisation and stat (un)accounting steps that are repeated
in several places.
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241216204124.3752367-4-dhowells@redhat.com
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
struct folio_queue *tail = rreq->buffer_tail, *new;
size_t added;
- new = kmalloc(sizeof(*new), GFP_NOFS);
+ new = netfs_folioq_alloc(GFP_NOFS);
if (!new)
return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(new);
new->prev = tail;
tail->next = new;
rreq->buffer_tail = new;
struct folio_batch put_batch;
size_t added;
- folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
+ folioq = netfs_folioq_alloc(GFP_KERNEL);
if (!folioq)
return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(folioq);
rreq->buffer = folioq;
rreq->buffer_tail = folioq;
rreq->submitted = rreq->start;
{
struct folio_queue *folioq;
- folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
+ folioq = netfs_folioq_alloc(GFP_KERNEL);
if (!folioq)
return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(folioq);
folioq_append(folioq, folio);
BUG_ON(folioq_folio(folioq, 0) != folio);
BUG_ON(folioq_folio_order(folioq, 0) != folio_order(folio));
#include <linux/swap.h>
#include "internal.h"
+/**
+ * netfs_folioq_alloc - Allocate a folio_queue struct
+ * @gfp: Allocation constraints
+ *
+ * Allocate, initialise and account the folio_queue struct.
+ */
+struct folio_queue *netfs_folioq_alloc(gfp_t gfp)
+{
+ struct folio_queue *fq;
+
+ fq = kmalloc(sizeof(*fq), gfp);
+ if (fq) {
+ netfs_stat(&netfs_n_folioq);
+ folioq_init(fq);
+ }
+ return fq;
+}
+EXPORT_SYMBOL(netfs_folioq_alloc);
+
+/**
+ * netfs_folioq_free - Free a folio_queue struct
+ * @folioq: The object to free
+ *
+ * Free and unaccount the folio_queue struct.
+ */
+void netfs_folioq_free(struct folio_queue *folioq)
+{
+ netfs_stat_d(&netfs_n_folioq);
+ kfree(folioq);
+}
+EXPORT_SYMBOL(netfs_folioq_free);
+
/*
* Make sure there's space in the rolling queue.
*/
if (next)
next->prev = NULL;
- netfs_stat_d(&netfs_n_folioq);
- kfree(head);
+ netfs_folioq_free(head);
wreq->buffer = next;
return next;
}
folio_put(folio);
}
}
- netfs_stat_d(&netfs_n_folioq);
- kfree(p);
+ netfs_folioq_free(p);
}
}
enum netfs_sreq_ref_trace;
typedef struct mempool_s mempool_t;
+struct folio_queue;
/**
* folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED]
int netfs_start_io_direct(struct inode *inode);
void netfs_end_io_direct(struct inode *inode);
+/* Miscellaneous APIs. */
+struct folio_queue *netfs_folioq_alloc(gfp_t gfp);
+void netfs_folioq_free(struct folio_queue *folioq);
+
/**
* netfs_inode - Get the netfs inode context from the inode
* @inode: The inode to query