struct eb_batch {
unsigned int nr;
unsigned int cur;
- struct extent_buffer *ebs[PAGEVEC_SIZE];
+ struct extent_buffer *ebs[FOLIO_BATCH_SIZE];
};
static inline bool eb_batch_add(struct eb_batch *batch, struct extent_buffer *eb)
{
batch->ebs[batch->nr++] = eb;
- return (batch->nr < PAGEVEC_SIZE);
+ return (batch->nr < FOLIO_BATCH_SIZE);
}
static inline void eb_batch_init(struct eb_batch *batch)
#include <linux/types.h>
/* 31 pointers + header align the folio_batch structure to a power of two */
-#define PAGEVEC_SIZE 31
+#define FOLIO_BATCH_SIZE 31
struct folio;
unsigned char nr;
unsigned char i;
bool percpu_pvec_drained;
- struct folio *folios[PAGEVEC_SIZE];
+ struct folio *folios[FOLIO_BATCH_SIZE];
};
/**
static inline unsigned int folio_batch_space(const struct folio_batch *fbatch)
{
- return PAGEVEC_SIZE - fbatch->nr;
+ return FOLIO_BATCH_SIZE - fbatch->nr;
}
/**
*/
struct folio_queue {
struct folio_batch vec; /* Folios in the queue segment */
- u8 orders[PAGEVEC_SIZE]; /* Order of each folio */
+ u8 orders[FOLIO_BATCH_SIZE]; /* Order of each folio */
struct folio_queue *next; /* Next queue segment or NULL */
struct folio_queue *prev; /* Previous queue segment of NULL */
unsigned long marks; /* 1-bit mark per folio */
unsigned long marks2; /* Second 1-bit mark per folio */
-#if PAGEVEC_SIZE > BITS_PER_LONG
+#if FOLIO_BATCH_SIZE > BITS_PER_LONG
#error marks is not big enough
#endif
unsigned int rreq_id;
*/
static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq)
{
- return PAGEVEC_SIZE;
+ return FOLIO_BATCH_SIZE;
}
/**
pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
pgoff_t end = (lend + 1) >> PAGE_SHIFT;
struct folio_batch fbatch;
- pgoff_t indices[PAGEVEC_SIZE];
+ pgoff_t indices[FOLIO_BATCH_SIZE];
struct folio *folio;
bool same_folio;
long nr_swaps_freed = 0;
struct address_space *mapping = inode->i_mapping;
pgoff_t start = 0;
struct folio_batch fbatch;
- pgoff_t indices[PAGEVEC_SIZE];
+ pgoff_t indices[FOLIO_BATCH_SIZE];
int ret = 0;
do {
void release_pages(release_pages_arg arg, int nr)
{
struct folio_batch fbatch;
- int refs[PAGEVEC_SIZE];
+ int refs[FOLIO_BATCH_SIZE];
struct encoded_page **encoded = arg.encoded_pages;
int i;
void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
{
struct folio_batch folios;
- unsigned int refs[PAGEVEC_SIZE];
+ unsigned int refs[FOLIO_BATCH_SIZE];
folio_batch_init(&folios);
for (int i = 0; i < nr; i++) {
pgoff_t start; /* inclusive */
pgoff_t end; /* exclusive */
struct folio_batch fbatch;
- pgoff_t indices[PAGEVEC_SIZE];
+ pgoff_t indices[FOLIO_BATCH_SIZE];
pgoff_t index;
int i;
struct folio *folio;
unsigned long mapping_try_invalidate(struct address_space *mapping,
pgoff_t start, pgoff_t end, unsigned long *nr_failed)
{
- pgoff_t indices[PAGEVEC_SIZE];
+ pgoff_t indices[FOLIO_BATCH_SIZE];
struct folio_batch fbatch;
pgoff_t index = start;
unsigned long ret;
int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
- pgoff_t indices[PAGEVEC_SIZE];
+ pgoff_t indices[FOLIO_BATCH_SIZE];
struct folio_batch fbatch;
pgoff_t index;
int i;