]> git.ipfire.org Git - people/ms/linux.git/blame - fs/btrfs/raid56.h
Merge branch 'for-6.0/dax' into libnvdimm-fixes
[people/ms/linux.git] / fs / btrfs / raid56.h
CommitLineData
9888c340 1/* SPDX-License-Identifier: GPL-2.0 */
53b381b3
DW
2/*
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
53b381b3
DW
5 */
6
9888c340
DS
7#ifndef BTRFS_RAID56_H
8#define BTRFS_RAID56_H
9
b8bea09a
QW
10#include <linux/workqueue.h>
11#include "volumes.h"
12
13enum btrfs_rbio_ops {
14 BTRFS_RBIO_WRITE,
15 BTRFS_RBIO_READ_REBUILD,
16 BTRFS_RBIO_PARITY_SCRUB,
17 BTRFS_RBIO_REBUILD_MISSING,
18};
19
20struct btrfs_raid_bio {
21 struct btrfs_io_context *bioc;
22
23 /*
24 * While we're doing RMW on a stripe we put it into a hash table so we
25 * can lock the stripe and merge more rbios into it.
26 */
27 struct list_head hash_list;
28
29 /* LRU list for the stripe cache */
30 struct list_head stripe_cache;
31
32 /* For scheduling work in the helper threads */
33 struct work_struct work;
34
35 /*
36 * bio_list and bio_list_lock are used to add more bios into the stripe
37 * in hopes of avoiding the full RMW
38 */
39 struct bio_list bio_list;
40 spinlock_t bio_list_lock;
41
42 /*
43 * Also protected by the bio_list_lock, the plug list is used by the
44 * plugging code to collect partial bios while plugged. The stripe
45 * locking code also uses it to hand off the stripe lock to the next
46 * pending IO.
47 */
48 struct list_head plug_list;
49
50 /* Flags that tell us if it is safe to merge with this bio. */
51 unsigned long flags;
52
53 /*
54 * Set if we're doing a parity rebuild for a read from higher up, which
55 * is handled differently from a parity rebuild as part of RMW.
56 */
57 enum btrfs_rbio_ops operation;
58
b8bea09a
QW
59 /* How many pages there are for the full stripe including P/Q */
60 u16 nr_pages;
61
62 /* How many sectors there are for the full stripe including P/Q */
63 u16 nr_sectors;
64
65 /* Number of data stripes (no p/q) */
66 u8 nr_data;
67
68 /* Numer of all stripes (including P/Q) */
69 u8 real_stripes;
70
71 /* How many pages there are for each stripe */
72 u8 stripe_npages;
73
74 /* How many sectors there are for each stripe */
75 u8 stripe_nsectors;
76
77 /* First bad stripe, -1 means no corruption */
78 s8 faila;
79
80 /* Second bad stripe (for RAID6 use) */
81 s8 failb;
82
83 /* Stripe number that we're scrubbing */
84 u8 scrubp;
85
86 /*
87 * Size of all the bios in the bio_list. This helps us decide if the
88 * rbio maps to a full stripe or not.
89 */
90 int bio_list_bytes;
91
92 int generic_bio_cnt;
93
94 refcount_t refs;
95
96 atomic_t stripes_pending;
97
98 atomic_t error;
99
d34e123d
CH
100 struct work_struct end_io_work;
101
b8bea09a
QW
102 /* Bitmap to record which horizontal stripe has data */
103 unsigned long dbitmap;
104
105 /* Allocated with stripe_nsectors-many bits for finish_*() calls */
106 unsigned long finish_pbitmap;
107
108 /*
109 * These are two arrays of pointers. We allocate the rbio big enough
110 * to hold them both and setup their locations when the rbio is
111 * allocated.
112 */
113
114 /*
115 * Pointers to pages that we allocated for reading/writing stripes
116 * directly from the disk (including P/Q).
117 */
118 struct page **stripe_pages;
119
120 /* Pointers to the sectors in the bio_list, for faster lookup */
121 struct sector_ptr *bio_sectors;
122
123 /*
124 * For subpage support, we need to map each sector to above
125 * stripe_pages.
126 */
127 struct sector_ptr *stripe_sectors;
128
129 /* Allocated with real_stripes-many pointers for finish_*() calls */
130 void **finish_pointers;
131};
132
133/*
134 * For trace event usage only. Records useful debug info for each bio submitted
135 * by RAID56 to each physical device.
136 *
137 * No matter signed or not, (-1) is always the one indicating we can not grab
138 * the proper stripe number.
139 */
140struct raid56_bio_trace_info {
141 u64 devid;
142
143 /* The offset inside the stripe. (<= STRIPE_LEN) */
144 u32 offset;
145
146 /*
147 * Stripe number.
148 * 0 is the first data stripe, and nr_data for P stripe,
149 * nr_data + 1 for Q stripe.
150 * >= real_stripes for
151 */
152 u8 stripe_nr;
153};
154
72ad8131 155static inline int nr_data_stripes(const struct map_lookup *map)
53b381b3 156{
0b30f719 157 return map->num_stripes - btrfs_nr_parity_stripes(map->type);
53b381b3 158}
b8bea09a 159
53b381b3
DW
160#define RAID5_P_STRIPE ((u64)-2)
161#define RAID6_Q_STRIPE ((u64)-1)
162
163#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \
164 ((x) == RAID6_Q_STRIPE))
165
5a6ac9ea
MX
166struct btrfs_device;
167
6065fd95
CH
168void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
169 int mirror_num, bool generic_io);
31683f4a 170void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc);
53b381b3 171
b4ee1782 172void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
6346f6bf 173 unsigned int pgoff, u64 logical);
b4ee1782 174
6a258d72 175struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
ff18a4af 176 struct btrfs_io_context *bioc,
6a258d72
QW
177 struct btrfs_device *scrub_dev,
178 unsigned long *dbitmap, int stripe_nsectors);
5a6ac9ea
MX
179void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
180
b4ee1782 181struct btrfs_raid_bio *
ff18a4af 182raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc);
b4ee1782
OS
183void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
184
53b381b3
DW
185int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
186void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
9888c340 187
53b381b3 188#endif