1 From 911d4ee8536d89ea8a6cd3e96b1c95a3ebc5ea66 Mon Sep 17 00:00:00 2001
2 From: NeilBrown <neilb@suse.de>
3 Date: Tue, 31 Mar 2009 14:39:38 +1100
4 Subject: [PATCH] md/raid5: simplify raid5_compute_sector interface
6 Rather than passing 'pd_idx' and 'qd_idx' to be filled in, pass
7 a 'struct stripe_head *' and fill in the relevant fields. This is
10 Signed-off-by: NeilBrown <neilb@suse.de>
12 drivers/md/raid5.c | 118 ++++++++++++++++++++++++++---------------------------
13 1 file changed, 58 insertions(+), 60 deletions(-)
15 --- linux-2.6.27-SLE11_BRANCH.orig/drivers/md/raid5.c
16 +++ linux-2.6.27-SLE11_BRANCH/drivers/md/raid5.c
17 @@ -301,14 +301,13 @@ static int grow_buffers(struct stripe_he
20 static void raid5_build_block (struct stripe_head *sh, int i);
21 -static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous,
23 +static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
24 + struct stripe_head *sh);
26 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
28 raid5_conf_t *conf = sh->raid_conf;
32 BUG_ON(atomic_read(&sh->count) != 0);
33 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
34 @@ -322,8 +321,7 @@ static void init_stripe(struct stripe_he
36 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
38 - sh->pd_idx = stripe_to_pdidx(sector, conf, previous, &qd_idx);
39 - sh->qd_idx = qd_idx;
40 + stripe_set_idx(sector, conf, previous, sh);
44 @@ -1264,12 +1262,13 @@ static void error(mddev_t *mddev, mdk_rd
45 * Output: index of the data and parity disk, and the sector # in them.
47 static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
49 - int *dd_idx, int *pd_idx, int *qd_idx)
50 + int previous, int *dd_idx,
51 + struct stripe_head *sh)
54 unsigned long chunk_number;
55 unsigned int chunk_offset;
58 int sectors_per_chunk = conf->chunk_size >> 9;
59 int raid_disks = previous ? conf->previous_raid_disks
60 @@ -1298,30 +1297,30 @@ static sector_t raid5_compute_sector(rai
62 * Select the parity disk based on the user selected algorithm.
65 + pd_idx = qd_idx = ~0;
68 - *pd_idx = data_disks;
69 + pd_idx = data_disks;
72 switch (conf->algorithm) {
73 case ALGORITHM_LEFT_ASYMMETRIC:
74 - *pd_idx = data_disks - stripe % raid_disks;
75 - if (*dd_idx >= *pd_idx)
76 + pd_idx = data_disks - stripe % raid_disks;
77 + if (*dd_idx >= pd_idx)
80 case ALGORITHM_RIGHT_ASYMMETRIC:
81 - *pd_idx = stripe % raid_disks;
82 - if (*dd_idx >= *pd_idx)
83 + pd_idx = stripe % raid_disks;
84 + if (*dd_idx >= pd_idx)
87 case ALGORITHM_LEFT_SYMMETRIC:
88 - *pd_idx = data_disks - stripe % raid_disks;
89 - *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
90 + pd_idx = data_disks - stripe % raid_disks;
91 + *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
93 case ALGORITHM_RIGHT_SYMMETRIC:
94 - *pd_idx = stripe % raid_disks;
95 - *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
96 + pd_idx = stripe % raid_disks;
97 + *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
100 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
101 @@ -1333,32 +1332,32 @@ static sector_t raid5_compute_sector(rai
103 switch (conf->algorithm) {
104 case ALGORITHM_LEFT_ASYMMETRIC:
105 - *pd_idx = raid_disks - 1 - (stripe % raid_disks);
106 - *qd_idx = *pd_idx + 1;
107 - if (*pd_idx == raid_disks-1) {
108 + pd_idx = raid_disks - 1 - (stripe % raid_disks);
109 + qd_idx = pd_idx + 1;
110 + if (pd_idx == raid_disks-1) {
111 (*dd_idx)++; /* Q D D D P */
113 - } else if (*dd_idx >= *pd_idx)
115 + } else if (*dd_idx >= pd_idx)
116 (*dd_idx) += 2; /* D D P Q D */
118 case ALGORITHM_RIGHT_ASYMMETRIC:
119 - *pd_idx = stripe % raid_disks;
120 - *qd_idx = *pd_idx + 1;
121 - if (*pd_idx == raid_disks-1) {
122 + pd_idx = stripe % raid_disks;
123 + qd_idx = pd_idx + 1;
124 + if (pd_idx == raid_disks-1) {
125 (*dd_idx)++; /* Q D D D P */
127 - } else if (*dd_idx >= *pd_idx)
129 + } else if (*dd_idx >= pd_idx)
130 (*dd_idx) += 2; /* D D P Q D */
132 case ALGORITHM_LEFT_SYMMETRIC:
133 - *pd_idx = raid_disks - 1 - (stripe % raid_disks);
134 - *qd_idx = (*pd_idx + 1) % raid_disks;
135 - *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
136 + pd_idx = raid_disks - 1 - (stripe % raid_disks);
137 + qd_idx = (pd_idx + 1) % raid_disks;
138 + *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
140 case ALGORITHM_RIGHT_SYMMETRIC:
141 - *pd_idx = stripe % raid_disks;
142 - *qd_idx = (*pd_idx + 1) % raid_disks;
143 - *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
144 + pd_idx = stripe % raid_disks;
145 + qd_idx = (pd_idx + 1) % raid_disks;
146 + *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
149 printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
150 @@ -1367,6 +1366,10 @@ static sector_t raid5_compute_sector(rai
155 + sh->pd_idx = pd_idx;
156 + sh->qd_idx = qd_idx;
159 * Finally, compute the new sector number
161 @@ -1384,8 +1387,9 @@ static sector_t compute_blocknr(struct s
162 int sectors_per_chunk = conf->chunk_size >> 9;
165 - int chunk_number, dummy1, dummy2, dummy3, dd_idx = i;
166 + int chunk_number, dummy1, dd_idx = i;
168 + struct stripe_head sh2;
171 chunk_offset = sector_div(new_sector, sectors_per_chunk);
172 @@ -1448,8 +1452,9 @@ static sector_t compute_blocknr(struct s
174 check = raid5_compute_sector (conf, r_sector,
175 (raid_disks != conf->raid_disks),
176 - &dummy1, &dummy2, &dummy3);
177 - if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
179 + if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
180 + || sh2.qd_idx != sh->qd_idx) {
181 printk(KERN_ERR "compute_blocknr: map not correct\n");
184 @@ -1845,11 +1850,11 @@ static int page_is_zero(struct page *p)
185 memcmp(a, a+4, STRIPE_SIZE-4)==0);
188 -static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous,
190 +static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
191 + struct stripe_head *sh)
193 int sectors_per_chunk = conf->chunk_size >> 9;
194 - int pd_idx, dd_idx;
196 int chunk_offset = sector_div(stripe, sectors_per_chunk);
197 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
199 @@ -1857,8 +1862,7 @@ static int stripe_to_pdidx(sector_t stri
200 stripe * (disks - conf->max_degraded)
201 *sectors_per_chunk + chunk_offset,
203 - &dd_idx, &pd_idx, qd_idxp);
209 @@ -2516,13 +2520,12 @@ static void handle_stripe_expansion(raid
210 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
211 for (i = 0; i < sh->disks; i++)
212 if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) {
213 - int dd_idx, pd_idx, qd_idx, j;
215 struct stripe_head *sh2;
217 sector_t bn = compute_blocknr(sh, i);
219 - raid5_compute_sector(conf, bn, 0,
220 - &dd_idx, &pd_idx, &qd_idx);
221 + sector_t s = raid5_compute_sector(conf, bn, 0,
223 sh2 = get_active_stripe(conf, s, 0, 1);
225 /* so far only the early blocks of this stripe
226 @@ -2806,11 +2809,9 @@ static bool handle_stripe5(struct stripe
228 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
229 !sh->reconstruct_state) {
231 /* Need to write out all blocks after computing parity */
232 sh->disks = conf->raid_disks;
233 - sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 0, &qd_idx);
234 - sh->qd_idx = qd_idx;
235 + stripe_set_idx(sh->sector, conf, 0, sh);
236 schedule_reconstruction5(sh, &s, 1, 1);
237 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
238 clear_bit(STRIPE_EXPAND_READY, &sh->state);
239 @@ -3027,10 +3028,8 @@ static bool handle_stripe6(struct stripe
241 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
242 /* Need to write out all blocks after computing P&Q */
244 sh->disks = conf->raid_disks;
245 - sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 0, &qd_idx);
246 - sh->qd_idx = qd_idx;
247 + stripe_set_idx(sh->sector, conf, 0, sh);
248 compute_parity6(sh, RECONSTRUCT_WRITE);
249 for (i = conf->raid_disks ; i-- ; ) {
250 set_bit(R5_LOCKED, &sh->dev[i].flags);
251 @@ -3302,7 +3301,7 @@ static int chunk_aligned_read(struct req
253 mddev_t *mddev = q->queuedata;
254 raid5_conf_t *conf = mddev_to_conf(mddev);
255 - unsigned int dd_idx, pd_idx, qd_idx;
256 + unsigned int dd_idx;
257 struct bio* align_bi;
260 @@ -3327,7 +3326,7 @@ static int chunk_aligned_read(struct req
262 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
264 - &dd_idx, &pd_idx, &qd_idx);
268 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
269 @@ -3419,7 +3418,7 @@ static int make_request(struct request_q
271 mddev_t *mddev = q->queuedata;
272 raid5_conf_t *conf = mddev_to_conf(mddev);
273 - int dd_idx, pd_idx, qd_idx;
276 sector_t logical_sector, last_sector;
277 struct stripe_head *sh;
278 @@ -3483,7 +3482,7 @@ static int make_request(struct request_q
280 new_sector = raid5_compute_sector(conf, logical_sector,
282 - &dd_idx, &pd_idx, &qd_idx);
284 pr_debug("raid5: make_request, sector %llu logical %llu\n",
285 (unsigned long long)new_sector,
286 (unsigned long long)logical_sector);
287 @@ -3571,7 +3570,6 @@ static sector_t reshape_request(mddev_t
289 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
290 struct stripe_head *sh;
291 - int pd_idx, qd_idx;
292 sector_t first_sector, last_sector;
293 int raid_disks = conf->previous_raid_disks;
294 int data_disks = raid_disks - conf->max_degraded;
295 @@ -3661,11 +3659,11 @@ static sector_t reshape_request(mddev_t
298 raid5_compute_sector(conf, sector_nr*(new_data_disks),
299 - 1, &dd_idx, &pd_idx, &qd_idx);
302 raid5_compute_sector(conf, ((sector_nr+conf->chunk_size/512)
303 *(new_data_disks) - 1),
304 - 1, &dd_idx, &pd_idx, &qd_idx);
306 if (last_sector >= (mddev->size<<1))
307 last_sector = (mddev->size<<1)-1;
308 while (first_sector <= last_sector) {
309 @@ -3800,7 +3798,7 @@ static int retry_aligned_read(raid5_con
310 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
312 struct stripe_head *sh;
313 - int dd_idx, pd_idx, qd_idx;
315 sector_t sector, logical_sector, last_sector;
318 @@ -3808,7 +3806,7 @@ static int retry_aligned_read(raid5_con
320 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
321 sector = raid5_compute_sector(conf, logical_sector,
322 - 0, &dd_idx, &pd_idx, &qd_idx);
324 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
326 for (; logical_sector < last_sector;