dump_bioc(fs_info, rbio->bioc);
btrfs_crit(fs_info,
-"rbio flags=0x%lx nr_sectors=%u nr_data=%u real_stripes=%u stripe_nsectors=%u scrubp=%u dbitmap=0x%lx",
+"rbio flags=0x%lx nr_sectors=%u nr_data=%u real_stripes=%u stripe_nsectors=%u sector_nsteps=%u scrubp=%u dbitmap=0x%lx",
rbio->flags, rbio->nr_sectors, rbio->nr_data,
rbio->real_stripes, rbio->stripe_nsectors,
- rbio->scrubp, rbio->dbitmap);
+ rbio->sector_nsteps, rbio->scrubp, rbio->dbitmap);
}
#define ASSERT_RBIO(expr, rbio) \
static void memcpy_from_bio_to_stripe(struct btrfs_raid_bio *rbio, unsigned int sector_nr)
{
- phys_addr_t dst = rbio->stripe_paddrs[sector_nr];
- phys_addr_t src = rbio->bio_paddrs[sector_nr];
+ const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
- ASSERT(dst != INVALID_PADDR);
- ASSERT(src != INVALID_PADDR);
+ ASSERT(sector_nr < rbio->nr_sectors);
+ for (int i = 0; i < rbio->sector_nsteps; i++) {
+ unsigned int index = sector_nr * rbio->sector_nsteps + i;
+ phys_addr_t dst = rbio->stripe_paddrs[index];
+ phys_addr_t src = rbio->bio_paddrs[index];
- memcpy_page(phys_to_page(dst), offset_in_page(dst),
- phys_to_page(src), offset_in_page(src),
- rbio->bioc->fs_info->sectorsize);
+ ASSERT(dst != INVALID_PADDR);
+ ASSERT(src != INVALID_PADDR);
+
+ memcpy_page(phys_to_page(dst), offset_in_page(dst),
+ phys_to_page(src), offset_in_page(src), step);
+ }
}
/*
for (i = 0; i < rbio->nr_sectors; i++) {
/* Some range not covered by bio (partial write), skip it */
- if (rbio->bio_paddrs[i] == INVALID_PADDR) {
+ if (rbio->bio_paddrs[i * rbio->sector_nsteps] == INVALID_PADDR) {
/*
* Even if the sector is not covered by bio, if it is
* a data sector it should still be uptodate as it is
*/
static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
{
- const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
+ const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
u32 offset;
int i;
- for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
+ for (i = 0, offset = 0; i < rbio->nr_sectors * rbio->sector_nsteps;
+ i++, offset += step) {
int page_index = offset >> PAGE_SHIFT;
ASSERT(page_index < rbio->nr_pages);
return 1;
}
-static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
- unsigned int stripe_nr,
- unsigned int sector_nr)
+/* Return the sector index for @stripe_nr and @sector_nr. */
+static unsigned int rbio_sector_index(const struct btrfs_raid_bio *rbio,
+ unsigned int stripe_nr,
+ unsigned int sector_nr)
{
+ unsigned int ret;
+
ASSERT_RBIO_STRIPE(stripe_nr < rbio->real_stripes, rbio, stripe_nr);
ASSERT_RBIO_SECTOR(sector_nr < rbio->stripe_nsectors, rbio, sector_nr);
- return stripe_nr * rbio->stripe_nsectors + sector_nr;
+ ret = stripe_nr * rbio->stripe_nsectors + sector_nr;
+ ASSERT(ret < rbio->nr_sectors);
+ return ret;
+}
+
+/* Return the paddr array index for @stripe_nr, @sector_nr and @step_nr. */
+static unsigned int rbio_paddr_index(const struct btrfs_raid_bio *rbio,
+ unsigned int stripe_nr,
+ unsigned int sector_nr,
+ unsigned int step_nr)
+{
+ unsigned int ret;
+
+ ASSERT_RBIO_SECTOR(step_nr < rbio->sector_nsteps, rbio, step_nr);
+
+ ret = rbio_sector_index(rbio, stripe_nr, sector_nr) * rbio->sector_nsteps + step_nr;
+ ASSERT(ret < rbio->nr_sectors * rbio->sector_nsteps);
+ return ret;
}
/* Return a paddr from rbio->stripe_sectors, not from the bio list */
static phys_addr_t rbio_stripe_paddr(const struct btrfs_raid_bio *rbio,
unsigned int stripe_nr, unsigned int sector_nr)
{
- return rbio->stripe_paddrs[rbio_stripe_sector_index(rbio, stripe_nr, sector_nr)];
+ return rbio->stripe_paddrs[rbio_paddr_index(rbio, stripe_nr, sector_nr, 0)];
}
/* Grab a paddr inside P stripe */
const unsigned int stripe_nsectors =
BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
const unsigned int num_sectors = stripe_nsectors * real_stripes;
+ const unsigned int step = min(fs_info->sectorsize, PAGE_SIZE);
+ const unsigned int sector_nsteps = fs_info->sectorsize / step;
struct btrfs_raid_bio *rbio;
/* PAGE_SIZE must also be aligned to sectorsize for subpage support */
return ERR_PTR(-ENOMEM);
rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
GFP_NOFS);
- rbio->bio_paddrs = kcalloc(num_sectors, sizeof(phys_addr_t), GFP_NOFS);
- rbio->stripe_paddrs = kcalloc(num_sectors, sizeof(phys_addr_t), GFP_NOFS);
+ rbio->bio_paddrs = kcalloc(num_sectors * sector_nsteps, sizeof(phys_addr_t), GFP_NOFS);
+ rbio->stripe_paddrs = kcalloc(num_sectors * sector_nsteps, sizeof(phys_addr_t), GFP_NOFS);
rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
rbio->stripe_uptodate_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
kfree(rbio);
return ERR_PTR(-ENOMEM);
}
- for (int i = 0; i < num_sectors; i++) {
+ for (int i = 0; i < num_sectors * sector_nsteps; i++) {
rbio->stripe_paddrs[i] = INVALID_PADDR;
rbio->bio_paddrs[i] = INVALID_PADDR;
}
rbio->real_stripes = real_stripes;
rbio->stripe_npages = stripe_npages;
rbio->stripe_nsectors = stripe_nsectors;
+ rbio->sector_nsteps = sector_nsteps;
refcount_set(&rbio->refs, 1);
atomic_set(&rbio->stripes_pending, 0);
static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
{
- const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
- const u32 sectorsize_bits = rbio->bioc->fs_info->sectorsize_bits;
+ struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
+ const u32 step_bits = min(fs_info->sectorsize_bits, PAGE_SHIFT);
struct bvec_iter iter = bio->bi_iter;
phys_addr_t paddr;
u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
rbio->bioc->full_stripe_logical;
- btrfs_bio_for_each_block(paddr, bio, &iter, sectorsize) {
- unsigned int index = (offset >> sectorsize_bits);
+ btrfs_bio_for_each_block(paddr, bio, &iter, step) {
+ unsigned int index = (offset >> step_bits);
rbio->bio_paddrs[index] = paddr;
- offset += sectorsize;
+ offset += step;
}
}
sector_paddr_in_rbio(rbio, stripe, sectornr, 0));
/* Then add the parity stripe */
- set_bit(rbio_stripe_sector_index(rbio, rbio->nr_data, sectornr),
+ set_bit(rbio_sector_index(rbio, rbio->nr_data, sectornr),
rbio->stripe_uptodate_bitmap);
pointers[stripe++] = kmap_local_paddr(rbio_pstripe_paddr(rbio, sectornr));
* RAID6, add the qstripe and call the library function
* to fill in our p/q
*/
- set_bit(rbio_stripe_sector_index(rbio, rbio->nr_data + 1, sectornr),
+ set_bit(rbio_sector_index(rbio, rbio->nr_data + 1, sectornr),
rbio->stripe_uptodate_bitmap);
pointers[stripe++] = kmap_local_paddr(rbio_qstripe_paddr(rbio, sectornr));
if (ret < 0)
goto cleanup;
- set_bit(rbio_stripe_sector_index(rbio, faila, sector_nr),
+ set_bit(rbio_sector_index(rbio, faila, sector_nr),
rbio->stripe_uptodate_bitmap);
}
if (failb >= 0) {
if (ret < 0)
goto cleanup;
- set_bit(rbio_stripe_sector_index(rbio, failb, sector_nr),
+ set_bit(rbio_sector_index(rbio, failb, sector_nr),
rbio->stripe_uptodate_bitmap);
}
int i;
for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) {
- phys_addr_t paddr = rbio->stripe_paddrs[i];
+ phys_addr_t paddr = rbio->stripe_paddrs[i * rbio->sector_nsteps];
/*
* We have a sector which doesn't have page nor uptodate,
* The bio cache may have handed us an uptodate sector. If so,
* use it.
*/
- if (test_bit(rbio_stripe_sector_index(rbio, stripe, sectornr),
+ if (test_bit(rbio_sector_index(rbio, stripe, sectornr),
rbio->stripe_uptodate_bitmap))
continue;