]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.fixes/0011-md-raid6-remove-expectation-that-Q-device-is-immedia.patch
Reenabled linux-xen, added patches for Xen Kernel Version 2.6.27.31,
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.fixes / 0011-md-raid6-remove-expectation-that-Q-device-is-immedia.patch
1 From d0dabf7e577411c2bf6b616c751544dc241213d4 Mon Sep 17 00:00:00 2001
2 From: NeilBrown <neilb@suse.de>
3 Date: Tue, 31 Mar 2009 14:39:38 +1100
4 Subject: [PATCH] md/raid6: remove expectation that Q device is immediately after P device.
5
6 Code currently assumes that the devices in a raid6 stripe are
7 0 1 ... N-1 P Q
8 in some rotated order. We will shortly add new layouts in which
9 this strict pattern is broken.
10 So remove this expectation. We still assume that the data disks
11 are roughly in-order. However P and Q can be inserted anywhere within
12 that order.
13
14 Signed-off-by: NeilBrown <neilb@suse.de>
15 ---
16 drivers/md/raid5.c | 213 ++++++++++++++++++++++++++-------------------
17 include/linux/raid/raid5.h | 15 +--
18 2 files changed, 133 insertions(+), 95 deletions(-)
19
20 --- linux-2.6.27-SLE11_BRANCH.orig/drivers/md/raid5.c
21 +++ linux-2.6.27-SLE11_BRANCH/drivers/md/raid5.c
22 @@ -135,12 +135,36 @@ static inline void raid5_set_bi_hw_segme
23 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
24 }
25
26 +/* Find first data disk in a raid6 stripe */
27 +static inline int raid6_d0(struct stripe_head *sh)
28 +{
29 + if (sh->qd_idx == sh->disks - 1)
30 + return 0;
31 + else
32 + return sh->qd_idx + 1;
33 +}
34 static inline int raid6_next_disk(int disk, int raid_disks)
35 {
36 disk++;
37 return (disk < raid_disks) ? disk : 0;
38 }
39
40 +/* When walking through the disks in a raid5, starting at raid6_d0,
41 + * We need to map each disk to a 'slot', where the data disks are slot
42 + * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
43 + * is raid_disks-1. This help does that mapping.
44 + */
45 +static int raid6_idx_to_slot(int idx, struct stripe_head *sh, int *count)
46 +{
47 + int slot;
48 + if (idx == sh->pd_idx)
49 + return sh->disks - 2;
50 + if (idx == sh->qd_idx)
51 + return sh->disks - 1;
52 + slot = (*count)++;
53 + return slot;
54 +}
55 +
56 static void return_io(struct bio *return_bi)
57 {
58 struct bio *bi = return_bi;
59 @@ -198,6 +222,7 @@ static void __release_stripe(raid5_conf_
60 }
61 }
62 }
63 +
64 static void release_stripe(struct stripe_head *sh)
65 {
66 raid5_conf_t *conf = sh->raid_conf;
67 @@ -276,12 +301,14 @@ static int grow_buffers(struct stripe_he
68 }
69
70 static void raid5_build_block (struct stripe_head *sh, int i);
71 -static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous);
72 +static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous,
73 + int *qd_idx);
74
75 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
76 {
77 raid5_conf_t *conf = sh->raid_conf;
78 int i;
79 + int qd_idx;
80
81 BUG_ON(atomic_read(&sh->count) != 0);
82 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
83 @@ -295,7 +322,8 @@ static void init_stripe(struct stripe_he
84
85 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
86 sh->sector = sector;
87 - sh->pd_idx = stripe_to_pdidx(sector, conf, previous);
88 + sh->pd_idx = stripe_to_pdidx(sector, conf, previous, &qd_idx);
89 + sh->qd_idx = qd_idx;
90 sh->state = 0;
91
92
93 @@ -1237,7 +1265,7 @@ static void error(mddev_t *mddev, mdk_rd
94 */
95 static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
96 int previous,
97 - int *dd_idx, int *pd_idx)
98 + int *dd_idx, int *pd_idx, int *qd_idx)
99 {
100 long stripe;
101 unsigned long chunk_number;
102 @@ -1270,6 +1298,7 @@ static sector_t raid5_compute_sector(rai
103 /*
104 * Select the parity disk based on the user selected algorithm.
105 */
106 + *qd_idx = ~0;
107 switch(conf->level) {
108 case 4:
109 *pd_idx = data_disks;
110 @@ -1305,24 +1334,30 @@ static sector_t raid5_compute_sector(rai
111 switch (conf->algorithm) {
112 case ALGORITHM_LEFT_ASYMMETRIC:
113 *pd_idx = raid_disks - 1 - (stripe % raid_disks);
114 - if (*pd_idx == raid_disks-1)
115 + *qd_idx = *pd_idx + 1;
116 + if (*pd_idx == raid_disks-1) {
117 (*dd_idx)++; /* Q D D D P */
118 - else if (*dd_idx >= *pd_idx)
119 + *qd_idx = 0;
120 + } else if (*dd_idx >= *pd_idx)
121 (*dd_idx) += 2; /* D D P Q D */
122 break;
123 case ALGORITHM_RIGHT_ASYMMETRIC:
124 *pd_idx = stripe % raid_disks;
125 - if (*pd_idx == raid_disks-1)
126 + *qd_idx = *pd_idx + 1;
127 + if (*pd_idx == raid_disks-1) {
128 (*dd_idx)++; /* Q D D D P */
129 - else if (*dd_idx >= *pd_idx)
130 + *qd_idx = 0;
131 + } else if (*dd_idx >= *pd_idx)
132 (*dd_idx) += 2; /* D D P Q D */
133 break;
134 case ALGORITHM_LEFT_SYMMETRIC:
135 *pd_idx = raid_disks - 1 - (stripe % raid_disks);
136 + *qd_idx = (*pd_idx + 1) % raid_disks;
137 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
138 break;
139 case ALGORITHM_RIGHT_SYMMETRIC:
140 *pd_idx = stripe % raid_disks;
141 + *qd_idx = (*pd_idx + 1) % raid_disks;
142 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
143 break;
144 default:
145 @@ -1349,7 +1384,7 @@ static sector_t compute_blocknr(struct s
146 int sectors_per_chunk = conf->chunk_size >> 9;
147 sector_t stripe;
148 int chunk_offset;
149 - int chunk_number, dummy1, dummy2, dd_idx = i;
150 + int chunk_number, dummy1, dummy2, dummy3, dd_idx = i;
151 sector_t r_sector;
152
153
154 @@ -1380,7 +1415,7 @@ static sector_t compute_blocknr(struct s
155 }
156 break;
157 case 6:
158 - if (i == raid6_next_disk(sh->pd_idx, raid_disks))
159 + if (i == sh->qd_idx)
160 return 0; /* It is the Q disk */
161 switch (conf->algorithm) {
162 case ALGORITHM_LEFT_ASYMMETRIC:
163 @@ -1413,7 +1448,7 @@ static sector_t compute_blocknr(struct s
164
165 check = raid5_compute_sector (conf, r_sector,
166 (raid_disks != conf->raid_disks),
167 - &dummy1, &dummy2);
168 + &dummy1, &dummy2, &dummy3);
169 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
170 printk(KERN_ERR "compute_blocknr: map not correct\n");
171 return 0;
172 @@ -1482,13 +1517,14 @@ static void copy_data(int frombio, struc
173 static void compute_parity6(struct stripe_head *sh, int method)
174 {
175 raid6_conf_t *conf = sh->raid_conf;
176 - int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = sh->disks, count;
177 + int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count;
178 struct bio *chosen;
179 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
180 void *ptrs[disks];
181
182 - qd_idx = raid6_next_disk(pd_idx, disks);
183 - d0_idx = raid6_next_disk(qd_idx, disks);
184 + pd_idx = sh->pd_idx;
185 + qd_idx = sh->qd_idx;
186 + d0_idx = raid6_d0(sh);
187
188 pr_debug("compute_parity, stripe %llu, method %d\n",
189 (unsigned long long)sh->sector, method);
190 @@ -1526,22 +1562,22 @@ static void compute_parity6(struct strip
191 set_bit(R5_UPTODATE, &sh->dev[i].flags);
192 }
193
194 -// switch(method) {
195 -// case RECONSTRUCT_WRITE:
196 -// case CHECK_PARITY:
197 -// case UPDATE_PARITY:
198 - /* Note that unlike RAID-5, the ordering of the disks matters greatly. */
199 - /* FIX: Is this ordering of drives even remotely optimal? */
200 - count = 0;
201 - i = d0_idx;
202 - do {
203 - ptrs[count++] = page_address(sh->dev[i].page);
204 - if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags))
205 - printk("block %d/%d not uptodate on parity calc\n", i,count);
206 - i = raid6_next_disk(i, disks);
207 - } while ( i != d0_idx );
208 -// break;
209 -// }
210 + /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/
211 + /* FIX: Is this ordering of drives even remotely optimal? */
212 + count = 0;
213 + i = d0_idx;
214 + do {
215 + int slot = raid6_idx_to_slot(i, sh, &count);
216 + ptrs[slot] = page_address(sh->dev[i].page);
217 + if (slot < sh->disks - 2 &&
218 + !test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
219 + printk(KERN_ERR "block %d/%d not uptodate "
220 + "on parity calc\n", i, count);
221 + BUG();
222 + }
223 + i = raid6_next_disk(i, disks);
224 + } while (i != d0_idx);
225 + BUG_ON(count+2 != disks);
226
227 raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs);
228
229 @@ -1565,8 +1601,7 @@ static void compute_block_1(struct strip
230 {
231 int i, count, disks = sh->disks;
232 void *ptr[MAX_XOR_BLOCKS], *dest, *p;
233 - int pd_idx = sh->pd_idx;
234 - int qd_idx = raid6_next_disk(pd_idx, disks);
235 + int qd_idx = sh->qd_idx;
236
237 pr_debug("compute_block_1, stripe %llu, idx %d\n",
238 (unsigned long long)sh->sector, dd_idx);
239 @@ -1602,21 +1637,31 @@ static void compute_block_1(struct strip
240 static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
241 {
242 int i, count, disks = sh->disks;
243 - int pd_idx = sh->pd_idx;
244 - int qd_idx = raid6_next_disk(pd_idx, disks);
245 - int d0_idx = raid6_next_disk(qd_idx, disks);
246 - int faila, failb;
247 -
248 - /* faila and failb are disk numbers relative to d0_idx */
249 - /* pd_idx become disks-2 and qd_idx become disks-1 */
250 - faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx;
251 - failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx;
252 + int d0_idx = raid6_d0(sh);
253 + int faila = -1, failb = -1;
254 + /**** FIX THIS: This could be very bad if disks is close to 256 ****/
255 + void *ptrs[disks];
256 +
257 + count = 0;
258 + i = d0_idx;
259 + do {
260 + int slot;
261 + slot = raid6_idx_to_slot(i, sh, &count);
262 + ptrs[slot] = page_address(sh->dev[i].page);
263 + if (i == dd_idx1)
264 + faila = slot;
265 + if (i == dd_idx2)
266 + failb = slot;
267 + i = raid6_next_disk(i, disks);
268 + } while (i != d0_idx);
269 + BUG_ON(count+2 != disks);
270
271 BUG_ON(faila == failb);
272 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
273
274 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
275 - (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb);
276 + (unsigned long long)sh->sector, dd_idx1, dd_idx2,
277 + faila, failb);
278
279 if ( failb == disks-1 ) {
280 /* Q disk is one of the missing disks */
281 @@ -1626,39 +1671,26 @@ static void compute_block_2(struct strip
282 return;
283 } else {
284 /* We're missing D+Q; recompute D from P */
285 - compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0);
286 + compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ?
287 + dd_idx2 : dd_idx1),
288 + 0);
289 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */
290 return;
291 }
292 }
293
294 - /* We're missing D+P or D+D; build pointer table */
295 - {
296 - /**** FIX THIS: This could be very bad if disks is close to 256 ****/
297 - void *ptrs[disks];
298 -
299 - count = 0;
300 - i = d0_idx;
301 - do {
302 - ptrs[count++] = page_address(sh->dev[i].page);
303 - i = raid6_next_disk(i, disks);
304 - if (i != dd_idx1 && i != dd_idx2 &&
305 - !test_bit(R5_UPTODATE, &sh->dev[i].flags))
306 - printk("compute_2 with missing block %d/%d\n", count, i);
307 - } while ( i != d0_idx );
308 -
309 - if ( failb == disks-2 ) {
310 - /* We're missing D+P. */
311 - raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs);
312 - } else {
313 - /* We're missing D+D. */
314 - raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs);
315 - }
316 -
317 - /* Both the above update both missing blocks */
318 - set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
319 - set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
320 + /* We're missing D+P or D+D; */
321 + if (failb == disks-2) {
322 + /* We're missing D+P. */
323 + raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs);
324 + } else {
325 + /* We're missing D+D. */
326 + raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs);
327 }
328 +
329 + /* Both the above update both missing blocks */
330 + set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
331 + set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
332 }
333
334 static void
335 @@ -1813,7 +1845,8 @@ static int page_is_zero(struct page *p)
336 memcmp(a, a+4, STRIPE_SIZE-4)==0);
337 }
338
339 -static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous)
340 +static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous,
341 + int *qd_idxp)
342 {
343 int sectors_per_chunk = conf->chunk_size >> 9;
344 int pd_idx, dd_idx;
345 @@ -1824,7 +1857,7 @@ static int stripe_to_pdidx(sector_t stri
346 stripe * (disks - conf->max_degraded)
347 *sectors_per_chunk + chunk_offset,
348 previous,
349 - &dd_idx, &pd_idx);
350 + &dd_idx, &pd_idx, qd_idxp);
351 return pd_idx;
352 }
353
354 @@ -2483,12 +2516,13 @@ static void handle_stripe_expansion(raid
355 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
356 for (i = 0; i < sh->disks; i++)
357 if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) {
358 - int dd_idx, pd_idx, j;
359 + int dd_idx, pd_idx, qd_idx, j;
360 struct stripe_head *sh2;
361
362 sector_t bn = compute_blocknr(sh, i);
363 - sector_t s = raid5_compute_sector(conf, bn, 0,
364 - &dd_idx, &pd_idx);
365 + sector_t s =
366 + raid5_compute_sector(conf, bn, 0,
367 + &dd_idx, &pd_idx, &qd_idx);
368 sh2 = get_active_stripe(conf, s, 0, 1);
369 if (sh2 == NULL)
370 /* so far only the early blocks of this stripe
371 @@ -2512,8 +2546,7 @@ static void handle_stripe_expansion(raid
372 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
373 for (j = 0; j < conf->raid_disks; j++)
374 if (j != sh2->pd_idx &&
375 - (!r6s || j != raid6_next_disk(sh2->pd_idx,
376 - sh2->disks)) &&
377 + (!r6s || j != sh2->qd_idx) &&
378 !test_bit(R5_Expanded, &sh2->dev[j].flags))
379 break;
380 if (j == conf->raid_disks) {
381 @@ -2773,9 +2806,11 @@ static bool handle_stripe5(struct stripe
382
383 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
384 !sh->reconstruct_state) {
385 + int qd_idx;
386 /* Need to write out all blocks after computing parity */
387 sh->disks = conf->raid_disks;
388 - sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 0);
389 + sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 0, &qd_idx);
390 + sh->qd_idx = qd_idx;
391 schedule_reconstruction5(sh, &s, 1, 1);
392 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
393 clear_bit(STRIPE_EXPAND_READY, &sh->state);
394 @@ -2816,7 +2851,7 @@ static bool handle_stripe6(struct stripe
395 struct r5dev *dev, *pdev, *qdev;
396 mdk_rdev_t *blocked_rdev = NULL;
397
398 - r6s.qd_idx = raid6_next_disk(pd_idx, disks);
399 + r6s.qd_idx = sh->qd_idx;
400 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
401 "pd_idx=%d, qd_idx=%d\n",
402 (unsigned long long)sh->sector, sh->state,
403 @@ -2992,8 +3027,10 @@ static bool handle_stripe6(struct stripe
404
405 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
406 /* Need to write out all blocks after computing P&Q */
407 + int qd_idx;
408 sh->disks = conf->raid_disks;
409 - sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 0);
410 + sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 0, &qd_idx);
411 + sh->qd_idx = qd_idx;
412 compute_parity6(sh, RECONSTRUCT_WRITE);
413 for (i = conf->raid_disks ; i-- ; ) {
414 set_bit(R5_LOCKED, &sh->dev[i].flags);
415 @@ -3265,7 +3302,7 @@ static int chunk_aligned_read(struct req
416 {
417 mddev_t *mddev = q->queuedata;
418 raid5_conf_t *conf = mddev_to_conf(mddev);
419 - unsigned int dd_idx, pd_idx;
420 + unsigned int dd_idx, pd_idx, qd_idx;
421 struct bio* align_bi;
422 mdk_rdev_t *rdev;
423
424 @@ -3290,7 +3327,7 @@ static int chunk_aligned_read(struct req
425 */
426 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
427 0,
428 - &dd_idx, &pd_idx);
429 + &dd_idx, &pd_idx, &qd_idx);
430
431 rcu_read_lock();
432 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
433 @@ -3382,7 +3419,7 @@ static int make_request(struct request_q
434 {
435 mddev_t *mddev = q->queuedata;
436 raid5_conf_t *conf = mddev_to_conf(mddev);
437 - unsigned int dd_idx, pd_idx;
438 + int dd_idx, pd_idx, qd_idx;
439 sector_t new_sector;
440 sector_t logical_sector, last_sector;
441 struct stripe_head *sh;
442 @@ -3446,7 +3483,7 @@ static int make_request(struct request_q
443
444 new_sector = raid5_compute_sector(conf, logical_sector,
445 previous,
446 - &dd_idx, &pd_idx);
447 + &dd_idx, &pd_idx, &qd_idx);
448 pr_debug("raid5: make_request, sector %llu logical %llu\n",
449 (unsigned long long)new_sector,
450 (unsigned long long)logical_sector);
451 @@ -3535,7 +3572,7 @@ static sector_t reshape_request(mddev_t
452 */
453 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
454 struct stripe_head *sh;
455 - int pd_idx;
456 + int pd_idx, qd_idx;
457 sector_t first_sector, last_sector;
458 int raid_disks = conf->previous_raid_disks;
459 int data_disks = raid_disks - conf->max_degraded;
460 @@ -3598,7 +3635,7 @@ static sector_t reshape_request(mddev_t
461 if (j == sh->pd_idx)
462 continue;
463 if (conf->level == 6 &&
464 - j == raid6_next_disk(sh->pd_idx, sh->disks))
465 + j == sh->qd_idx)
466 continue;
467 s = compute_blocknr(sh, j);
468 if (s < mddev->array_sectors) {
469 @@ -3625,11 +3662,11 @@ static sector_t reshape_request(mddev_t
470 */
471 first_sector =
472 raid5_compute_sector(conf, sector_nr*(new_data_disks),
473 - 1, &dd_idx, &pd_idx);
474 + 1, &dd_idx, &pd_idx, &qd_idx);
475 last_sector =
476 raid5_compute_sector(conf, ((sector_nr+conf->chunk_size/512)
477 *(new_data_disks) - 1),
478 - 1, &dd_idx, &pd_idx);
479 + 1, &dd_idx, &pd_idx, &qd_idx);
480 if (last_sector >= (mddev->size<<1))
481 last_sector = (mddev->size<<1)-1;
482 while (first_sector <= last_sector) {
483 @@ -3764,7 +3801,7 @@ static int retry_aligned_read(raid5_con
484 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
485 */
486 struct stripe_head *sh;
487 - int dd_idx, pd_idx;
488 + int dd_idx, pd_idx, qd_idx;
489 sector_t sector, logical_sector, last_sector;
490 int scnt = 0;
491 int remaining;
492 @@ -3772,7 +3809,7 @@ static int retry_aligned_read(raid5_con
493
494 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
495 sector = raid5_compute_sector(conf, logical_sector,
496 - 0, &dd_idx, &pd_idx);
497 + 0, &dd_idx, &pd_idx, &qd_idx);
498 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
499
500 for (; logical_sector < last_sector;
501 --- linux-2.6.27-SLE11_BRANCH.orig/include/linux/raid/raid5.h
502 +++ linux-2.6.27-SLE11_BRANCH/include/linux/raid/raid5.h
503 @@ -197,15 +197,16 @@ enum reconstruct_states {
504
505 struct stripe_head {
506 struct hlist_node hash;
507 - struct list_head lru; /* inactive_list or handle_list */
508 - struct raid5_private_data *raid_conf;
509 - sector_t sector; /* sector of this row */
510 - int pd_idx; /* parity disk index */
511 - unsigned long state; /* state flags */
512 - atomic_t count; /* nr of active thread/requests */
513 + struct list_head lru; /* inactive_list or handle_list */
514 + struct raid5_private_data *raid_conf;
515 + sector_t sector; /* sector of this row */
516 + short pd_idx; /* parity disk index */
517 + short qd_idx; /* 'Q' disk index for raid6 */
518 + unsigned long state; /* state flags */
519 + atomic_t count; /* nr of active thread/requests */
520 spinlock_t lock;
521 int bm_seq; /* sequence number for bitmap flushes */
522 - int disks; /* disks in stripe */
523 + int disks; /* disks in stripe */
524 enum check_states check_state;
525 enum reconstruct_states reconstruct_state;
526 /* stripe_operations