/* used to describe mapped buffers */
struct orangefs_bufmap_desc {
void __user *uaddr; /* user space address pointer */
- struct page **page_array; /* array of mapped pages */
- int array_count; /* size of above arrays */
- struct list_head list_link;
+ struct folio **folio_array;
+ /*
+ * folio_offsets could be needed when userspace sets custom
+ * sizes in user_desc, or when folios aren't all backed by
+ * 2MB THPs.
+ */
+ size_t *folio_offsets;
+ int folio_count;
+ bool is_two_2mib_chunks;
};
static struct orangefs_bufmap {
int desc_count;
int total_size;
int page_count;
+ int folio_count;
struct page **page_array;
+ struct folio **folio_array;
struct orangefs_bufmap_desc *desc_array;
/* array to track usage of buffer descriptors */
static void
orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
{
+ int i;
+
+ if (!bufmap)
+ return;
+
+ for (i = 0; i < bufmap->desc_count; i++) {
+ kfree(bufmap->desc_array[i].folio_array);
+ kfree(bufmap->desc_array[i].folio_offsets);
+ bufmap->desc_array[i].folio_array = NULL;
+ bufmap->desc_array[i].folio_offsets = NULL;
+ }
kfree(bufmap->page_array);
kfree(bufmap->desc_array);
bitmap_free(bufmap->buffer_index_array);
bufmap->desc_count = user_desc->count;
bufmap->desc_size = user_desc->size;
bufmap->desc_shift = ilog2(bufmap->desc_size);
+ bufmap->page_count = bufmap->total_size / PAGE_SIZE;
- bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, GFP_KERNEL);
+ bufmap->buffer_index_array =
+ bitmap_zalloc(bufmap->desc_count, GFP_KERNEL);
if (!bufmap->buffer_index_array)
goto out_free_bufmap;
if (!bufmap->desc_array)
goto out_free_index_array;
- bufmap->page_count = bufmap->total_size / PAGE_SIZE;
-
/* allocate storage to track our page mappings */
bufmap->page_array =
kzalloc_objs(struct page *, bufmap->page_count);
if (!bufmap->page_array)
goto out_free_desc_array;
+ /* allocate folio array. */
+ bufmap->folio_array = kzalloc_objs(struct folio *, bufmap->page_count);
+ if (!bufmap->folio_array)
+ goto out_free_page_array;
+
return bufmap;
+out_free_page_array:
+ kfree(bufmap->page_array);
out_free_desc_array:
kfree(bufmap->desc_array);
out_free_index_array:
return NULL;
}
-static int
-orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
- struct ORANGEFS_dev_map_desc *user_desc)
+static int orangefs_bufmap_group_folios(struct orangefs_bufmap *bufmap)
+{
+ int i = 0;
+ int f = 0;
+ int k;
+ int num_pages;
+ struct page *page;
+ struct folio *folio;
+
+ while (i < bufmap->page_count) {
+ page = bufmap->page_array[i];
+ folio = page_folio(page);
+ num_pages = folio_nr_pages(folio);
+ gossip_debug(GOSSIP_BUFMAP_DEBUG,
+ "%s: i:%d: num_pages:%d: \n", __func__, i, num_pages);
+
+ for (k = 1; k < num_pages; k++) {
+ if (bufmap->page_array[i + k] != folio_page(folio, k)) {
+ gossip_err("%s: bad match, i:%d: k:%d:\n",
+ __func__, i, k);
+ return -EINVAL;
+ }
+ }
+
+ bufmap->folio_array[f++] = folio;
+ i += num_pages;
+ }
+
+ bufmap->folio_count = f;
+ pr_info("%s: Grouped %d folios from %d pages.\n",
+ __func__,
+ bufmap->folio_count,
+ bufmap->page_count);
+ return 0;
+}
+
+static int orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
+ struct ORANGEFS_dev_map_desc *user_desc)
{
int pages_per_desc = bufmap->desc_size / PAGE_SIZE;
- int offset = 0, ret, i;
+ int ret;
+ int i;
+ int j;
+ int current_folio;
+ int desc_pages_needed;
+ int desc_folio_count;
+ int remaining_pages;
+ int need_avail_min;
+ int pages_assigned_to_this_desc;
+ int allocated_descs = 0;
+ size_t current_offset;
+ size_t adjust_offset;
+ struct folio *folio;
/* map the pages */
ret = pin_user_pages_fast((unsigned long)user_desc->ptr,
- bufmap->page_count, FOLL_WRITE, bufmap->page_array);
+ bufmap->page_count,
+ FOLL_WRITE,
+ bufmap->page_array);
if (ret < 0)
return ret;
if (ret != bufmap->page_count) {
gossip_err("orangefs error: asked for %d pages, only got %d.\n",
bufmap->page_count, ret);
-
for (i = 0; i < ret; i++)
unpin_user_page(bufmap->page_array[i]);
return -ENOMEM;
for (i = 0; i < bufmap->page_count; i++)
flush_dcache_page(bufmap->page_array[i]);
- /* build a list of available descriptors */
- for (offset = 0, i = 0; i < bufmap->desc_count; i++) {
- bufmap->desc_array[i].page_array = &bufmap->page_array[offset];
- bufmap->desc_array[i].array_count = pages_per_desc;
+ /*
+ * Group pages into folios.
+ */
+ ret = orangefs_bufmap_group_folios(bufmap);
+ if (ret)
+ goto unpin;
+
+ pr_info("%s: desc_size=%d bytes (%d pages per desc), total folios=%d\n",
+ __func__, bufmap->desc_size, pages_per_desc,
+ bufmap->folio_count);
+
+ current_folio = 0;
+ remaining_pages = 0;
+ current_offset = 0;
+ for (i = 0; i < bufmap->desc_count; i++) {
+ desc_pages_needed = pages_per_desc;
+ desc_folio_count = 0;
+ pages_assigned_to_this_desc = 0;
+ bufmap->desc_array[i].is_two_2mib_chunks = false;
+
+ /*
+ * We hope there was enough memory that each desc is
+ * covered by two THPs/folios, if not we want to keep on
+ * working even if there's only one page per folio.
+ */
+ bufmap->desc_array[i].folio_array =
+ kzalloc_objs(struct folio *, pages_per_desc);
+ if (!bufmap->desc_array[i].folio_array) {
+ ret = -ENOMEM;
+ goto unpin;
+ }
+
+ bufmap->desc_array[i].folio_offsets =
+ kzalloc_objs(size_t, pages_per_desc);
+ if (!bufmap->desc_array[i].folio_offsets) {
+ ret = -ENOMEM;
+ kfree(bufmap->desc_array[i].folio_array);
+ bufmap->desc_array[i].folio_array = NULL;
+ goto unpin;
+ }
+
bufmap->desc_array[i].uaddr =
- (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE));
- offset += pages_per_desc;
+ user_desc->ptr + (size_t)i * bufmap->desc_size;
+
+ /*
+ * Accumulate folios until desc is full.
+ */
+ while (desc_pages_needed > 0) {
+ if (remaining_pages == 0) {
+ /* shouldn't happen. */
+ if (current_folio >= bufmap->folio_count) {
+ ret = -EINVAL;
+ goto unpin;
+ }
+ folio = bufmap->folio_array[current_folio++];
+ remaining_pages = folio_nr_pages(folio);
+ current_offset = 0;
+ } else {
+ folio = bufmap->folio_array[current_folio - 1];
+ }
+
+ need_avail_min =
+ min(desc_pages_needed, remaining_pages);
+ adjust_offset = need_avail_min * PAGE_SIZE;
+
+ bufmap->desc_array[i].folio_array[desc_folio_count] =
+ folio;
+ bufmap->desc_array[i].folio_offsets[desc_folio_count] =
+ current_offset;
+ desc_folio_count++;
+ pages_assigned_to_this_desc += need_avail_min;
+ desc_pages_needed -= need_avail_min;
+ remaining_pages -= need_avail_min;
+ current_offset += adjust_offset;
+ }
+
+ /* Detect optimal case: two 2MiB folios per 4MiB slot. */
+ if (desc_folio_count == 2 &&
+ folio_nr_pages(bufmap->desc_array[i].folio_array[0]) == 512 &&
+ folio_nr_pages(bufmap->desc_array[i].folio_array[1]) == 512) {
+ bufmap->desc_array[i].is_two_2mib_chunks = true;
+ gossip_debug(GOSSIP_BUFMAP_DEBUG, "%s: descriptor :%d: "
+ "optimal folio/page ratio.\n", __func__, i);
+ }
+
+ bufmap->desc_array[i].folio_count = desc_folio_count;
+ gossip_debug(GOSSIP_BUFMAP_DEBUG,
+ " descriptor %d: folio_count=%d, "
+ "pages_assigned=%d (should be %d)\n",
+ i, desc_folio_count, pages_assigned_to_this_desc,
+ pages_per_desc);
+
+ allocated_descs = i + 1;
}
return 0;
+unpin:
+ /*
+ * rollback any allocations we got so far...
+ * Memory pressure, like in generic/340, led me
+ * to write the rollback this way.
+ */
+ for (j = 0; j < allocated_descs; j++) {
+ if (bufmap->desc_array[j].folio_array) {
+ kfree(bufmap->desc_array[j].folio_array);
+ bufmap->desc_array[j].folio_array = NULL;
+ }
+ if (bufmap->desc_array[j].folio_offsets) {
+ kfree(bufmap->desc_array[j].folio_offsets);
+ bufmap->desc_array[j].folio_offsets = NULL;
+ }
+ }
+ unpin_user_pages(bufmap->page_array, bufmap->page_count);
+ return ret;
}
/*
*
* initializes the mapped buffer interface
*
+ * user_desc is the parameters provided by userspace for the bufmap.
+ *
* returns 0 on success, -errno on failure
*/
int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc)
int ret = -EINVAL;
gossip_debug(GOSSIP_BUFMAP_DEBUG,
- "orangefs_bufmap_initialize: called (ptr ("
- "%p) sz (%d) cnt(%d).\n",
+ "%s: called (ptr (" "%p) sz (%d) cnt(%d).\n",
+ __func__,
user_desc->ptr,
user_desc->size,
user_desc->count);
spin_unlock(&orangefs_bufmap_lock);
gossip_debug(GOSSIP_BUFMAP_DEBUG,
- "orangefs_bufmap_initialize: exiting normally\n");
+ "%s: exiting normally\n", __func__);
return 0;
out_unmap_bufmap:
size_t size)
{
struct orangefs_bufmap_desc *to;
- int i;
+ size_t remaining = size;
+ int folio_index = 0;
+ struct folio *folio;
+ size_t folio_offset;
+ size_t folio_avail;
+ size_t copy_amount;
+ size_t copied;
+ void *kaddr;
+ size_t half;
+ size_t first;
+ size_t second;
+
+ to = &__orangefs_bufmap->desc_array[buffer_index];
+
+ /* shouldn't happen... */
+ if (size > 4194304)
+ pr_info("%s: size:%zu\n", __func__, size);
gossip_debug(GOSSIP_BUFMAP_DEBUG,
- "%s: buffer_index:%d: size:%zu:\n",
- __func__, buffer_index, size);
+ "%s: buffer_index:%d size:%zu folio_count:%d\n",
+ __func__,
+ buffer_index,
+ size,
+ to->folio_count);
+
+ /* Fast path: exactly two 2 MiB folios */
+ if (to->is_two_2mib_chunks && size <= 4194304) {
+ gossip_debug(GOSSIP_BUFMAP_DEBUG,
+ "%s: fastpath hit.\n", __func__);
+ half = 2097152; /* 2 MiB */
+ first = min(size, half);
+ second = (size > half) ? size - half : 0;
+
+ /* First 2 MiB chunk */
+ kaddr = kmap_local_folio(to->folio_array[0], 0);
+ copied = copy_from_iter(kaddr, first, iter);
+ kunmap_local(kaddr);
+ if (copied != first)
+ return -EFAULT;
- to = &__orangefs_bufmap->desc_array[buffer_index];
- for (i = 0; size; i++) {
- struct page *page = to->page_array[i];
- size_t n = size;
- if (n > PAGE_SIZE)
- n = PAGE_SIZE;
- if (copy_page_from_iter(page, 0, n, iter) != n)
+ if (second == 0)
+ return 0;
+
+ /* Second 2 MiB chunk */
+ kaddr = kmap_local_folio(to->folio_array[1], 0);
+ copied = copy_from_iter(kaddr, second, iter);
+ kunmap_local(kaddr);
+ if (copied != second)
return -EFAULT;
- size -= n;
+
+ return 0;
}
+
+ while (remaining > 0) {
+
+ if (unlikely(folio_index >= to->folio_count ||
+ to->folio_array[folio_index] == NULL)) {
+ gossip_err("%s: "
+ "folio_index:%d: >= folio_count:%d: "
+ "(size %zu, buffer %d)\n",
+ __func__,
+ folio_index,
+ to->folio_count,
+ size,
+ buffer_index);
+ return -EFAULT;
+ }
+
+ folio = to->folio_array[folio_index];
+ folio_offset = to->folio_offsets[folio_index];
+ folio_avail = folio_nr_pages(folio) * PAGE_SIZE - folio_offset;
+ copy_amount = min(remaining, folio_avail);
+ kaddr = kmap_local_folio(folio, folio_offset);
+ copied = copy_from_iter(kaddr, copy_amount, iter);
+ kunmap_local(kaddr);
+
+ if (copied != copy_amount)
+ return -EFAULT;
+
+ remaining -= copied;
+ folio_index++;
+ }
+
return 0;
}
size_t size)
{
struct orangefs_bufmap_desc *from;
- int i;
+ size_t remaining = size;
+ int folio_index = 0;
+ struct folio *folio;
+ size_t folio_offset;
+ size_t folio_avail;
+ size_t copy_amount;
+ size_t copied;
+ void *kaddr;
+ size_t half;
+ size_t first;
+ size_t second;
from = &__orangefs_bufmap->desc_array[buffer_index];
+
+ /* shouldn't happen... */
+ if (size > 4194304)
+ pr_info("%s: size:%zu\n", __func__, size);
+
gossip_debug(GOSSIP_BUFMAP_DEBUG,
- "%s: buffer_index:%d: size:%zu:\n",
- __func__, buffer_index, size);
+ "%s: buffer_index:%d size:%zu folio_count:%d\n",
+ __func__,
+ buffer_index,
+ size,
+ from->folio_count);
+
+ /* Fast path: exactly two 2 MiB folios */
+ if (from->is_two_2mib_chunks && size <= 4194304) {
+ gossip_debug(GOSSIP_BUFMAP_DEBUG,
+ "%s: fastpath hit.\n", __func__);
+ half = 2097152; /* 2 MiB */
+ first = min(size, half);
+ second = (size > half) ? size - half : 0;
+ void *kaddr;
+ size_t copied;
+
+ /* First 2 MiB chunk */
+ kaddr = kmap_local_folio(from->folio_array[0], 0);
+ copied = copy_to_iter(kaddr, first, iter);
+ kunmap_local(kaddr);
+ if (copied != first)
+ return -EFAULT;
+
+ if (second == 0)
+ return 0;
+
+ /* Second 2 MiB chunk */
+ kaddr = kmap_local_folio(from->folio_array[1], 0);
+ copied = copy_to_iter(kaddr, second, iter);
+ kunmap_local(kaddr);
+ if (copied != second)
+ return -EFAULT;
+ return 0;
+ }
+
+ while (remaining > 0) {
+
+ if (unlikely(folio_index >= from->folio_count ||
+ from->folio_array[folio_index] == NULL)) {
+ gossip_err("%s: "
+ "folio_index:%d: >= folio_count:%d: "
+ "(size %zu, buffer %d)\n",
+ __func__,
+ folio_index,
+ from->folio_count,
+ size,
+ buffer_index);
+ return -EFAULT;
+ }
- for (i = 0; size; i++) {
- struct page *page = from->page_array[i];
- size_t n = size;
- if (n > PAGE_SIZE)
- n = PAGE_SIZE;
- n = copy_page_to_iter(page, 0, n, iter);
- if (!n)
+ folio = from->folio_array[folio_index];
+ folio_offset = from->folio_offsets[folio_index];
+ folio_avail = folio_nr_pages(folio) * PAGE_SIZE - folio_offset;
+ copy_amount = min(remaining, folio_avail);
+
+ kaddr = kmap_local_folio(folio, folio_offset);
+ copied = copy_to_iter(kaddr, copy_amount, iter);
+ kunmap_local(kaddr);
+
+ if (copied != copy_amount)
return -EFAULT;
- size -= n;
+
+ remaining -= copied;
+ folio_index++;
}
+
return 0;
}