]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.fixes/0012-md-raid5-simplify-raid5_compute_sector-interface.patch
Fix oinkmaster patch.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.fixes / 0012-md-raid5-simplify-raid5_compute_sector-interface.patch
CommitLineData
2cb7cef9
BS
1From 911d4ee8536d89ea8a6cd3e96b1c95a3ebc5ea66 Mon Sep 17 00:00:00 2001
2From: NeilBrown <neilb@suse.de>
3Date: Tue, 31 Mar 2009 14:39:38 +1100
4Subject: [PATCH] md/raid5: simplify raid5_compute_sector interface
5
6Rather than passing 'pd_idx' and 'qd_idx' to be filled in, pass
7a 'struct stripe_head *' and fill in the relevant fields. This is
8more extensible.
9
10Signed-off-by: NeilBrown <neilb@suse.de>
11---
12 drivers/md/raid5.c | 118 ++++++++++++++++++++++++++---------------------------
13 1 file changed, 58 insertions(+), 60 deletions(-)
14
15--- linux-2.6.27-SLE11_BRANCH.orig/drivers/md/raid5.c
16+++ linux-2.6.27-SLE11_BRANCH/drivers/md/raid5.c
17@@ -301,14 +301,13 @@ static int grow_buffers(struct stripe_he
18 }
19
20 static void raid5_build_block (struct stripe_head *sh, int i);
21-static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous,
22- int *qd_idx);
23+static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
24+ struct stripe_head *sh);
25
26 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
27 {
28 raid5_conf_t *conf = sh->raid_conf;
29 int i;
30- int qd_idx;
31
32 BUG_ON(atomic_read(&sh->count) != 0);
33 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
34@@ -322,8 +321,7 @@ static void init_stripe(struct stripe_he
35
36 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
37 sh->sector = sector;
38- sh->pd_idx = stripe_to_pdidx(sector, conf, previous, &qd_idx);
39- sh->qd_idx = qd_idx;
40+ stripe_set_idx(sector, conf, previous, sh);
41 sh->state = 0;
42
43
44@@ -1264,12 +1262,13 @@ static void error(mddev_t *mddev, mdk_rd
45 * Output: index of the data and parity disk, and the sector # in them.
46 */
47 static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
48- int previous,
49- int *dd_idx, int *pd_idx, int *qd_idx)
50+ int previous, int *dd_idx,
51+ struct stripe_head *sh)
52 {
53 long stripe;
54 unsigned long chunk_number;
55 unsigned int chunk_offset;
56+ int pd_idx, qd_idx;
57 sector_t new_sector;
58 int sectors_per_chunk = conf->chunk_size >> 9;
59 int raid_disks = previous ? conf->previous_raid_disks
60@@ -1298,30 +1297,30 @@ static sector_t raid5_compute_sector(rai
61 /*
62 * Select the parity disk based on the user selected algorithm.
63 */
64- *qd_idx = ~0;
65+ pd_idx = qd_idx = ~0;
66 switch(conf->level) {
67 case 4:
68- *pd_idx = data_disks;
69+ pd_idx = data_disks;
70 break;
71 case 5:
72 switch (conf->algorithm) {
73 case ALGORITHM_LEFT_ASYMMETRIC:
74- *pd_idx = data_disks - stripe % raid_disks;
75- if (*dd_idx >= *pd_idx)
76+ pd_idx = data_disks - stripe % raid_disks;
77+ if (*dd_idx >= pd_idx)
78 (*dd_idx)++;
79 break;
80 case ALGORITHM_RIGHT_ASYMMETRIC:
81- *pd_idx = stripe % raid_disks;
82- if (*dd_idx >= *pd_idx)
83+ pd_idx = stripe % raid_disks;
84+ if (*dd_idx >= pd_idx)
85 (*dd_idx)++;
86 break;
87 case ALGORITHM_LEFT_SYMMETRIC:
88- *pd_idx = data_disks - stripe % raid_disks;
89- *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
90+ pd_idx = data_disks - stripe % raid_disks;
91+ *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
92 break;
93 case ALGORITHM_RIGHT_SYMMETRIC:
94- *pd_idx = stripe % raid_disks;
95- *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
96+ pd_idx = stripe % raid_disks;
97+ *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
98 break;
99 default:
100 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
101@@ -1333,32 +1332,32 @@ static sector_t raid5_compute_sector(rai
102 /**** FIX THIS ****/
103 switch (conf->algorithm) {
104 case ALGORITHM_LEFT_ASYMMETRIC:
105- *pd_idx = raid_disks - 1 - (stripe % raid_disks);
106- *qd_idx = *pd_idx + 1;
107- if (*pd_idx == raid_disks-1) {
108+ pd_idx = raid_disks - 1 - (stripe % raid_disks);
109+ qd_idx = pd_idx + 1;
110+ if (pd_idx == raid_disks-1) {
111 (*dd_idx)++; /* Q D D D P */
112- *qd_idx = 0;
113- } else if (*dd_idx >= *pd_idx)
114+ qd_idx = 0;
115+ } else if (*dd_idx >= pd_idx)
116 (*dd_idx) += 2; /* D D P Q D */
117 break;
118 case ALGORITHM_RIGHT_ASYMMETRIC:
119- *pd_idx = stripe % raid_disks;
120- *qd_idx = *pd_idx + 1;
121- if (*pd_idx == raid_disks-1) {
122+ pd_idx = stripe % raid_disks;
123+ qd_idx = pd_idx + 1;
124+ if (pd_idx == raid_disks-1) {
125 (*dd_idx)++; /* Q D D D P */
126- *qd_idx = 0;
127- } else if (*dd_idx >= *pd_idx)
128+ qd_idx = 0;
129+ } else if (*dd_idx >= pd_idx)
130 (*dd_idx) += 2; /* D D P Q D */
131 break;
132 case ALGORITHM_LEFT_SYMMETRIC:
133- *pd_idx = raid_disks - 1 - (stripe % raid_disks);
134- *qd_idx = (*pd_idx + 1) % raid_disks;
135- *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
136+ pd_idx = raid_disks - 1 - (stripe % raid_disks);
137+ qd_idx = (pd_idx + 1) % raid_disks;
138+ *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
139 break;
140 case ALGORITHM_RIGHT_SYMMETRIC:
141- *pd_idx = stripe % raid_disks;
142- *qd_idx = (*pd_idx + 1) % raid_disks;
143- *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
144+ pd_idx = stripe % raid_disks;
145+ qd_idx = (pd_idx + 1) % raid_disks;
146+ *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
147 break;
148 default:
149 printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
150@@ -1367,6 +1366,10 @@ static sector_t raid5_compute_sector(rai
151 break;
152 }
153
154+ if (sh) {
155+ sh->pd_idx = pd_idx;
156+ sh->qd_idx = qd_idx;
157+ }
158 /*
159 * Finally, compute the new sector number
160 */
161@@ -1384,8 +1387,9 @@ static sector_t compute_blocknr(struct s
162 int sectors_per_chunk = conf->chunk_size >> 9;
163 sector_t stripe;
164 int chunk_offset;
165- int chunk_number, dummy1, dummy2, dummy3, dd_idx = i;
166+ int chunk_number, dummy1, dd_idx = i;
167 sector_t r_sector;
168+ struct stripe_head sh2;
169
170
171 chunk_offset = sector_div(new_sector, sectors_per_chunk);
172@@ -1448,8 +1452,9 @@ static sector_t compute_blocknr(struct s
173
174 check = raid5_compute_sector (conf, r_sector,
175 (raid_disks != conf->raid_disks),
176- &dummy1, &dummy2, &dummy3);
177- if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
178+ &dummy1, &sh2);
179+ if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
180+ || sh2.qd_idx != sh->qd_idx) {
181 printk(KERN_ERR "compute_blocknr: map not correct\n");
182 return 0;
183 }
184@@ -1845,11 +1850,11 @@ static int page_is_zero(struct page *p)
185 memcmp(a, a+4, STRIPE_SIZE-4)==0);
186 }
187
188-static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int previous,
189- int *qd_idxp)
190+static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
191+ struct stripe_head *sh)
192 {
193 int sectors_per_chunk = conf->chunk_size >> 9;
194- int pd_idx, dd_idx;
195+ int dd_idx;
196 int chunk_offset = sector_div(stripe, sectors_per_chunk);
197 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
198
199@@ -1857,8 +1862,7 @@ static int stripe_to_pdidx(sector_t stri
200 stripe * (disks - conf->max_degraded)
201 *sectors_per_chunk + chunk_offset,
202 previous,
203- &dd_idx, &pd_idx, qd_idxp);
204- return pd_idx;
205+ &dd_idx, sh);
206 }
207
208 static void
209@@ -2516,13 +2520,12 @@ static void handle_stripe_expansion(raid
210 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
211 for (i = 0; i < sh->disks; i++)
212 if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) {
213- int dd_idx, pd_idx, qd_idx, j;
214+ int dd_idx, j;
215 struct stripe_head *sh2;
216
217 sector_t bn = compute_blocknr(sh, i);
218- sector_t s =
219- raid5_compute_sector(conf, bn, 0,
220- &dd_idx, &pd_idx, &qd_idx);
221+ sector_t s = raid5_compute_sector(conf, bn, 0,
222+ &dd_idx, NULL);
223 sh2 = get_active_stripe(conf, s, 0, 1);
224 if (sh2 == NULL)
225 /* so far only the early blocks of this stripe
226@@ -2806,11 +2809,9 @@ static bool handle_stripe5(struct stripe
227
228 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
229 !sh->reconstruct_state) {
230- int qd_idx;
231 /* Need to write out all blocks after computing parity */
232 sh->disks = conf->raid_disks;
233- sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 0, &qd_idx);
234- sh->qd_idx = qd_idx;
235+ stripe_set_idx(sh->sector, conf, 0, sh);
236 schedule_reconstruction5(sh, &s, 1, 1);
237 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
238 clear_bit(STRIPE_EXPAND_READY, &sh->state);
239@@ -3027,10 +3028,8 @@ static bool handle_stripe6(struct stripe
240
241 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
242 /* Need to write out all blocks after computing P&Q */
243- int qd_idx;
244 sh->disks = conf->raid_disks;
245- sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 0, &qd_idx);
246- sh->qd_idx = qd_idx;
247+ stripe_set_idx(sh->sector, conf, 0, sh);
248 compute_parity6(sh, RECONSTRUCT_WRITE);
249 for (i = conf->raid_disks ; i-- ; ) {
250 set_bit(R5_LOCKED, &sh->dev[i].flags);
251@@ -3302,7 +3301,7 @@ static int chunk_aligned_read(struct req
252 {
253 mddev_t *mddev = q->queuedata;
254 raid5_conf_t *conf = mddev_to_conf(mddev);
255- unsigned int dd_idx, pd_idx, qd_idx;
256+ unsigned int dd_idx;
257 struct bio* align_bi;
258 mdk_rdev_t *rdev;
259
260@@ -3327,7 +3326,7 @@ static int chunk_aligned_read(struct req
261 */
262 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
263 0,
264- &dd_idx, &pd_idx, &qd_idx);
265+ &dd_idx, NULL);
266
267 rcu_read_lock();
268 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
269@@ -3419,7 +3418,7 @@ static int make_request(struct request_q
270 {
271 mddev_t *mddev = q->queuedata;
272 raid5_conf_t *conf = mddev_to_conf(mddev);
273- int dd_idx, pd_idx, qd_idx;
274+ int dd_idx;
275 sector_t new_sector;
276 sector_t logical_sector, last_sector;
277 struct stripe_head *sh;
278@@ -3483,7 +3482,7 @@ static int make_request(struct request_q
279
280 new_sector = raid5_compute_sector(conf, logical_sector,
281 previous,
282- &dd_idx, &pd_idx, &qd_idx);
283+ &dd_idx, NULL);
284 pr_debug("raid5: make_request, sector %llu logical %llu\n",
285 (unsigned long long)new_sector,
286 (unsigned long long)logical_sector);
287@@ -3572,7 +3571,6 @@ static sector_t reshape_request(mddev_t
288 */
289 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
290 struct stripe_head *sh;
291- int pd_idx, qd_idx;
292 sector_t first_sector, last_sector;
293 int raid_disks = conf->previous_raid_disks;
294 int data_disks = raid_disks - conf->max_degraded;
295@@ -3662,11 +3660,11 @@ static sector_t reshape_request(mddev_t
296 */
297 first_sector =
298 raid5_compute_sector(conf, sector_nr*(new_data_disks),
299- 1, &dd_idx, &pd_idx, &qd_idx);
300+ 1, &dd_idx, NULL);
301 last_sector =
302 raid5_compute_sector(conf, ((sector_nr+conf->chunk_size/512)
303 *(new_data_disks) - 1),
304- 1, &dd_idx, &pd_idx, &qd_idx);
305+ 1, &dd_idx, NULL);
306 if (last_sector >= (mddev->size<<1))
307 last_sector = (mddev->size<<1)-1;
308 while (first_sector <= last_sector) {
309@@ -3801,7 +3799,7 @@ static int retry_aligned_read(raid5_con
310 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
311 */
312 struct stripe_head *sh;
313- int dd_idx, pd_idx, qd_idx;
314+ int dd_idx;
315 sector_t sector, logical_sector, last_sector;
316 int scnt = 0;
317 int remaining;
318@@ -3809,7 +3807,7 @@ static int retry_aligned_read(raid5_con
319
320 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
321 sector = raid5_compute_sector(conf, logical_sector,
322- 0, &dd_idx, &pd_idx, &qd_idx);
323+ 0, &dd_idx, NULL);
324 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
325
326 for (; logical_sector < last_sector;