]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
b7c614298bbbcd4d5a9798253009b7b9204db22b
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* The DDF metadata handling.
51 * DDF metadata lives at the end of the device.
52 * The last 512 byte block provides an 'anchor' which is used to locate
53 * the rest of the metadata which usually lives immediately behind the anchor.
54 *
55 * Note:
56 * - all multibyte numeric fields are bigendian.
57 * - all strings are space padded.
58 *
59 */
60
61 typedef struct __be16 {
62 __u16 _v16;
63 } be16;
64 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
65 #define be16_and(x, y) ((x)._v16 & (y)._v16)
66 #define be16_or(x, y) ((x)._v16 | (y)._v16)
67 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
68 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
69
70 typedef struct __be32 {
71 __u32 _v32;
72 } be32;
73 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
74
75 typedef struct __be64 {
76 __u64 _v64;
77 } be64;
78 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
79
80 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
81 static inline be16 cpu_to_be16(__u16 x)
82 {
83 be16 be = { ._v16 = __cpu_to_be16(x) };
84 return be;
85 }
86
87 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
88 static inline be32 cpu_to_be32(__u32 x)
89 {
90 be32 be = { ._v32 = __cpu_to_be32(x) };
91 return be;
92 }
93
94 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
95 static inline be64 cpu_to_be64(__u64 x)
96 {
97 be64 be = { ._v64 = __cpu_to_be64(x) };
98 return be;
99 }
100
101 /* Primary Raid Level (PRL) */
102 #define DDF_RAID0 0x00
103 #define DDF_RAID1 0x01
104 #define DDF_RAID3 0x03
105 #define DDF_RAID4 0x04
106 #define DDF_RAID5 0x05
107 #define DDF_RAID1E 0x11
108 #define DDF_JBOD 0x0f
109 #define DDF_CONCAT 0x1f
110 #define DDF_RAID5E 0x15
111 #define DDF_RAID5EE 0x25
112 #define DDF_RAID6 0x06
113
114 /* Raid Level Qualifier (RLQ) */
115 #define DDF_RAID0_SIMPLE 0x00
116 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
117 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
118 #define DDF_RAID3_0 0x00 /* parity in first extent */
119 #define DDF_RAID3_N 0x01 /* parity in last extent */
120 #define DDF_RAID4_0 0x00 /* parity in first extent */
121 #define DDF_RAID4_N 0x01 /* parity in last extent */
122 /* these apply to raid5e and raid5ee as well */
123 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
124 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
125 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
126 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
127
128 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
129 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
130
131 /* Secondary RAID Level (SRL) */
132 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
133 #define DDF_2MIRRORED 0x01
134 #define DDF_2CONCAT 0x02
135 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
136
137 /* Magic numbers */
138 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
139 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
140 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
141 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
142 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
143 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
144 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
145 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
146 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
147 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
148
149 #define DDF_GUID_LEN 24
150 #define DDF_REVISION_0 "01.00.00"
151 #define DDF_REVISION_2 "01.02.00"
152
153 struct ddf_header {
154 be32 magic; /* DDF_HEADER_MAGIC */
155 be32 crc;
156 char guid[DDF_GUID_LEN];
157 char revision[8]; /* 01.02.00 */
158 be32 seq; /* starts at '1' */
159 be32 timestamp;
160 __u8 openflag;
161 __u8 foreignflag;
162 __u8 enforcegroups;
163 __u8 pad0; /* 0xff */
164 __u8 pad1[12]; /* 12 * 0xff */
165 /* 64 bytes so far */
166 __u8 header_ext[32]; /* reserved: fill with 0xff */
167 be64 primary_lba;
168 be64 secondary_lba;
169 __u8 type;
170 __u8 pad2[3]; /* 0xff */
171 be32 workspace_len; /* sectors for vendor space -
172 * at least 32768(sectors) */
173 be64 workspace_lba;
174 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
175 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
176 be16 max_partitions; /* i.e. max num of configuration
177 record entries per disk */
178 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
179 *12/512) */
180 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
181 __u8 pad3[54]; /* 0xff */
182 /* 192 bytes so far */
183 be32 controller_section_offset;
184 be32 controller_section_length;
185 be32 phys_section_offset;
186 be32 phys_section_length;
187 be32 virt_section_offset;
188 be32 virt_section_length;
189 be32 config_section_offset;
190 be32 config_section_length;
191 be32 data_section_offset;
192 be32 data_section_length;
193 be32 bbm_section_offset;
194 be32 bbm_section_length;
195 be32 diag_space_offset;
196 be32 diag_space_length;
197 be32 vendor_offset;
198 be32 vendor_length;
199 /* 256 bytes so far */
200 __u8 pad4[256]; /* 0xff */
201 };
202
203 /* type field */
204 #define DDF_HEADER_ANCHOR 0x00
205 #define DDF_HEADER_PRIMARY 0x01
206 #define DDF_HEADER_SECONDARY 0x02
207
208 /* The content of the 'controller section' - global scope */
209 struct ddf_controller_data {
210 be32 magic; /* DDF_CONTROLLER_MAGIC */
211 be32 crc;
212 char guid[DDF_GUID_LEN];
213 struct controller_type {
214 be16 vendor_id;
215 be16 device_id;
216 be16 sub_vendor_id;
217 be16 sub_device_id;
218 } type;
219 char product_id[16];
220 __u8 pad[8]; /* 0xff */
221 __u8 vendor_data[448];
222 };
223
224 /* The content of phys_section - global scope */
225 struct phys_disk {
226 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
227 be32 crc;
228 be16 used_pdes;
229 be16 max_pdes;
230 __u8 pad[52];
231 struct phys_disk_entry {
232 char guid[DDF_GUID_LEN];
233 be32 refnum;
234 be16 type;
235 be16 state;
236 be64 config_size; /* DDF structures must be after here */
237 char path[18]; /* another horrible structure really */
238 __u8 pad[6];
239 } entries[0];
240 };
241
242 /* phys_disk_entry.type is a bitmap - bigendian remember */
243 #define DDF_Forced_PD_GUID 1
244 #define DDF_Active_in_VD 2
245 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
246 #define DDF_Spare 8 /* overrides Global_spare */
247 #define DDF_Foreign 16
248 #define DDF_Legacy 32 /* no DDF on this device */
249
250 #define DDF_Interface_mask 0xf00
251 #define DDF_Interface_SCSI 0x100
252 #define DDF_Interface_SAS 0x200
253 #define DDF_Interface_SATA 0x300
254 #define DDF_Interface_FC 0x400
255
256 /* phys_disk_entry.state is a bigendian bitmap */
257 #define DDF_Online 1
258 #define DDF_Failed 2 /* overrides 1,4,8 */
259 #define DDF_Rebuilding 4
260 #define DDF_Transition 8
261 #define DDF_SMART 16
262 #define DDF_ReadErrors 32
263 #define DDF_Missing 64
264
265 /* The content of the virt_section global scope */
266 struct virtual_disk {
267 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
268 be32 crc;
269 be16 populated_vdes;
270 be16 max_vdes;
271 __u8 pad[52];
272 struct virtual_entry {
273 char guid[DDF_GUID_LEN];
274 be16 unit;
275 __u16 pad0; /* 0xffff */
276 be16 guid_crc;
277 be16 type;
278 __u8 state;
279 __u8 init_state;
280 __u8 pad1[14];
281 char name[16];
282 } entries[0];
283 };
284
285 /* virtual_entry.type is a bitmap - bigendian */
286 #define DDF_Shared 1
287 #define DDF_Enforce_Groups 2
288 #define DDF_Unicode 4
289 #define DDF_Owner_Valid 8
290
291 /* virtual_entry.state is a bigendian bitmap */
292 #define DDF_state_mask 0x7
293 #define DDF_state_optimal 0x0
294 #define DDF_state_degraded 0x1
295 #define DDF_state_deleted 0x2
296 #define DDF_state_missing 0x3
297 #define DDF_state_failed 0x4
298 #define DDF_state_part_optimal 0x5
299
300 #define DDF_state_morphing 0x8
301 #define DDF_state_inconsistent 0x10
302
303 /* virtual_entry.init_state is a bigendian bitmap */
304 #define DDF_initstate_mask 0x03
305 #define DDF_init_not 0x00
306 #define DDF_init_quick 0x01 /* initialisation is progress.
307 * i.e. 'state_inconsistent' */
308 #define DDF_init_full 0x02
309
310 #define DDF_access_mask 0xc0
311 #define DDF_access_rw 0x00
312 #define DDF_access_ro 0x80
313 #define DDF_access_blocked 0xc0
314
315 /* The content of the config_section - local scope
316 * It has multiple records each config_record_len sectors
317 * They can be vd_config or spare_assign
318 */
319
320 struct vd_config {
321 be32 magic; /* DDF_VD_CONF_MAGIC */
322 be32 crc;
323 char guid[DDF_GUID_LEN];
324 be32 timestamp;
325 be32 seqnum;
326 __u8 pad0[24];
327 be16 prim_elmnt_count;
328 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
329 __u8 prl;
330 __u8 rlq;
331 __u8 sec_elmnt_count;
332 __u8 sec_elmnt_seq;
333 __u8 srl;
334 be64 blocks; /* blocks per component could be different
335 * on different component devices...(only
336 * for concat I hope) */
337 be64 array_blocks; /* blocks in array */
338 __u8 pad1[8];
339 be32 spare_refs[8];
340 __u8 cache_pol[8];
341 __u8 bg_rate;
342 __u8 pad2[3];
343 __u8 pad3[52];
344 __u8 pad4[192];
345 __u8 v0[32]; /* reserved- 0xff */
346 __u8 v1[32]; /* reserved- 0xff */
347 __u8 v2[16]; /* reserved- 0xff */
348 __u8 v3[16]; /* reserved- 0xff */
349 __u8 vendor[32];
350 be32 phys_refnum[0]; /* refnum of each disk in sequence */
351 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
352 bvd are always the same size */
353 };
354 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
355
356 /* vd_config.cache_pol[7] is a bitmap */
357 #define DDF_cache_writeback 1 /* else writethrough */
358 #define DDF_cache_wadaptive 2 /* only applies if writeback */
359 #define DDF_cache_readahead 4
360 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
361 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
362 #define DDF_cache_wallowed 32 /* enable write caching */
363 #define DDF_cache_rallowed 64 /* enable read caching */
364
365 struct spare_assign {
366 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
367 be32 crc;
368 be32 timestamp;
369 __u8 reserved[7];
370 __u8 type;
371 be16 populated; /* SAEs used */
372 be16 max; /* max SAEs */
373 __u8 pad[8];
374 struct spare_assign_entry {
375 char guid[DDF_GUID_LEN];
376 be16 secondary_element;
377 __u8 pad[6];
378 } spare_ents[0];
379 };
380 /* spare_assign.type is a bitmap */
381 #define DDF_spare_dedicated 0x1 /* else global */
382 #define DDF_spare_revertible 0x2 /* else committable */
383 #define DDF_spare_active 0x4 /* else not active */
384 #define DDF_spare_affinity 0x8 /* enclosure affinity */
385
386 /* The data_section contents - local scope */
387 struct disk_data {
388 be32 magic; /* DDF_PHYS_DATA_MAGIC */
389 be32 crc;
390 char guid[DDF_GUID_LEN];
391 be32 refnum; /* crc of some magic drive data ... */
392 __u8 forced_ref; /* set when above was not result of magic */
393 __u8 forced_guid; /* set if guid was forced rather than magic */
394 __u8 vendor[32];
395 __u8 pad[442];
396 };
397
398 /* bbm_section content */
399 struct bad_block_log {
400 be32 magic;
401 be32 crc;
402 be16 entry_count;
403 be32 spare_count;
404 __u8 pad[10];
405 be64 first_spare;
406 struct mapped_block {
407 be64 defective_start;
408 be32 replacement_start;
409 be16 remap_count;
410 __u8 pad[2];
411 } entries[0];
412 };
413
414 /* Struct for internally holding ddf structures */
415 /* The DDF structure stored on each device is potentially
416 * quite different, as some data is global and some is local.
417 * The global data is:
418 * - ddf header
419 * - controller_data
420 * - Physical disk records
421 * - Virtual disk records
422 * The local data is:
423 * - Configuration records
424 * - Physical Disk data section
425 * ( and Bad block and vendor which I don't care about yet).
426 *
427 * The local data is parsed into separate lists as it is read
428 * and reconstructed for writing. This means that we only need
429 * to make config changes once and they are automatically
430 * propagated to all devices.
431 * Note that the ddf_super has space of the conf and disk data
432 * for this disk and also for a list of all such data.
433 * The list is only used for the superblock that is being
434 * built in Create or Assemble to describe the whole array.
435 */
436 struct ddf_super {
437 struct ddf_header anchor, primary, secondary;
438 struct ddf_controller_data controller;
439 struct ddf_header *active;
440 struct phys_disk *phys;
441 struct virtual_disk *virt;
442 int pdsize, vdsize;
443 unsigned int max_part, mppe, conf_rec_len;
444 int currentdev;
445 int updates_pending;
446 struct vcl {
447 union {
448 char space[512];
449 struct {
450 struct vcl *next;
451 unsigned int vcnum; /* index into ->virt */
452 struct vd_config **other_bvds;
453 __u64 *block_sizes; /* NULL if all the same */
454 };
455 };
456 struct vd_config conf;
457 } *conflist, *currentconf;
458 struct dl {
459 union {
460 char space[512];
461 struct {
462 struct dl *next;
463 int major, minor;
464 char *devname;
465 int fd;
466 unsigned long long size; /* sectors */
467 be64 primary_lba; /* sectors */
468 be64 secondary_lba; /* sectors */
469 be64 workspace_lba; /* sectors */
470 int pdnum; /* index in ->phys */
471 struct spare_assign *spare;
472 void *mdupdate; /* hold metadata update */
473
474 /* These fields used by auto-layout */
475 int raiddisk; /* slot to fill in autolayout */
476 __u64 esize;
477 };
478 };
479 struct disk_data disk;
480 struct vcl *vlist[0]; /* max_part in size */
481 } *dlist, *add_list;
482 };
483
484 #ifndef offsetof
485 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
486 #endif
487
488 #if DEBUG
489 static int all_ff(const char *guid);
490 static void pr_state(struct ddf_super *ddf, const char *msg)
491 {
492 unsigned int i;
493 dprintf("%s/%s: ", __func__, msg);
494 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
495 if (all_ff(ddf->virt->entries[i].guid))
496 continue;
497 dprintf("%u(s=%02x i=%02x) ", i,
498 ddf->virt->entries[i].state,
499 ddf->virt->entries[i].init_state);
500 }
501 dprintf("\n");
502 }
503 #else
504 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
505 #endif
506
507 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
508 {
509 ddf->updates_pending = 1;
510 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
511 pr_state(ddf, func);
512 }
513
514 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
515
516 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
517 be32 refnum, unsigned int nmax,
518 const struct vd_config **bvd,
519 unsigned int *idx);
520
521 static be32 calc_crc(void *buf, int len)
522 {
523 /* crcs are always at the same place as in the ddf_header */
524 struct ddf_header *ddf = buf;
525 be32 oldcrc = ddf->crc;
526 __u32 newcrc;
527 ddf->crc = cpu_to_be32(0xffffffff);
528
529 newcrc = crc32(0, buf, len);
530 ddf->crc = oldcrc;
531 /* The crc is store (like everything) bigendian, so convert
532 * here for simplicity
533 */
534 return cpu_to_be32(newcrc);
535 }
536
537 #define DDF_INVALID_LEVEL 0xff
538 #define DDF_NO_SECONDARY 0xff
539 static int err_bad_md_layout(const mdu_array_info_t *array)
540 {
541 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
542 array->level, array->layout, array->raid_disks);
543 return -1;
544 }
545
546 static int layout_md2ddf(const mdu_array_info_t *array,
547 struct vd_config *conf)
548 {
549 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
550 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
551 __u8 sec_elmnt_count = 1;
552 __u8 srl = DDF_NO_SECONDARY;
553
554 switch (array->level) {
555 case LEVEL_LINEAR:
556 prl = DDF_CONCAT;
557 break;
558 case 0:
559 rlq = DDF_RAID0_SIMPLE;
560 prl = DDF_RAID0;
561 break;
562 case 1:
563 switch (array->raid_disks) {
564 case 2:
565 rlq = DDF_RAID1_SIMPLE;
566 break;
567 case 3:
568 rlq = DDF_RAID1_MULTI;
569 break;
570 default:
571 return err_bad_md_layout(array);
572 }
573 prl = DDF_RAID1;
574 break;
575 case 4:
576 if (array->layout != 0)
577 return err_bad_md_layout(array);
578 rlq = DDF_RAID4_N;
579 prl = DDF_RAID4;
580 break;
581 case 5:
582 switch (array->layout) {
583 case ALGORITHM_LEFT_ASYMMETRIC:
584 rlq = DDF_RAID5_N_RESTART;
585 break;
586 case ALGORITHM_RIGHT_ASYMMETRIC:
587 rlq = DDF_RAID5_0_RESTART;
588 break;
589 case ALGORITHM_LEFT_SYMMETRIC:
590 rlq = DDF_RAID5_N_CONTINUE;
591 break;
592 case ALGORITHM_RIGHT_SYMMETRIC:
593 /* not mentioned in standard */
594 default:
595 return err_bad_md_layout(array);
596 }
597 prl = DDF_RAID5;
598 break;
599 case 6:
600 switch (array->layout) {
601 case ALGORITHM_ROTATING_N_RESTART:
602 rlq = DDF_RAID5_N_RESTART;
603 break;
604 case ALGORITHM_ROTATING_ZERO_RESTART:
605 rlq = DDF_RAID6_0_RESTART;
606 break;
607 case ALGORITHM_ROTATING_N_CONTINUE:
608 rlq = DDF_RAID5_N_CONTINUE;
609 break;
610 default:
611 return err_bad_md_layout(array);
612 }
613 prl = DDF_RAID6;
614 break;
615 case 10:
616 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
617 rlq = DDF_RAID1_SIMPLE;
618 prim_elmnt_count = cpu_to_be16(2);
619 sec_elmnt_count = array->raid_disks / 2;
620 } else if (array->raid_disks % 3 == 0
621 && array->layout == 0x103) {
622 rlq = DDF_RAID1_MULTI;
623 prim_elmnt_count = cpu_to_be16(3);
624 sec_elmnt_count = array->raid_disks / 3;
625 } else
626 return err_bad_md_layout(array);
627 srl = DDF_2SPANNED;
628 prl = DDF_RAID1;
629 break;
630 default:
631 return err_bad_md_layout(array);
632 }
633 conf->prl = prl;
634 conf->prim_elmnt_count = prim_elmnt_count;
635 conf->rlq = rlq;
636 conf->srl = srl;
637 conf->sec_elmnt_count = sec_elmnt_count;
638 return 0;
639 }
640
641 static int err_bad_ddf_layout(const struct vd_config *conf)
642 {
643 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
644 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
645 return -1;
646 }
647
648 static int layout_ddf2md(const struct vd_config *conf,
649 mdu_array_info_t *array)
650 {
651 int level = LEVEL_UNSUPPORTED;
652 int layout = 0;
653 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
654
655 if (conf->sec_elmnt_count > 1) {
656 /* see also check_secondary() */
657 if (conf->prl != DDF_RAID1 ||
658 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
659 pr_err("Unsupported secondary RAID level %u/%u\n",
660 conf->prl, conf->srl);
661 return -1;
662 }
663 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
664 layout = 0x102;
665 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
666 layout = 0x103;
667 else
668 return err_bad_ddf_layout(conf);
669 raiddisks *= conf->sec_elmnt_count;
670 level = 10;
671 goto good;
672 }
673
674 switch (conf->prl) {
675 case DDF_CONCAT:
676 level = LEVEL_LINEAR;
677 break;
678 case DDF_RAID0:
679 if (conf->rlq != DDF_RAID0_SIMPLE)
680 return err_bad_ddf_layout(conf);
681 level = 0;
682 break;
683 case DDF_RAID1:
684 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
685 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
686 return err_bad_ddf_layout(conf);
687 level = 1;
688 break;
689 case DDF_RAID4:
690 if (conf->rlq != DDF_RAID4_N)
691 return err_bad_ddf_layout(conf);
692 level = 4;
693 break;
694 case DDF_RAID5:
695 switch (conf->rlq) {
696 case DDF_RAID5_N_RESTART:
697 layout = ALGORITHM_LEFT_ASYMMETRIC;
698 break;
699 case DDF_RAID5_0_RESTART:
700 layout = ALGORITHM_RIGHT_ASYMMETRIC;
701 break;
702 case DDF_RAID5_N_CONTINUE:
703 layout = ALGORITHM_LEFT_SYMMETRIC;
704 break;
705 default:
706 return err_bad_ddf_layout(conf);
707 }
708 level = 5;
709 break;
710 case DDF_RAID6:
711 switch (conf->rlq) {
712 case DDF_RAID5_N_RESTART:
713 layout = ALGORITHM_ROTATING_N_RESTART;
714 break;
715 case DDF_RAID6_0_RESTART:
716 layout = ALGORITHM_ROTATING_ZERO_RESTART;
717 break;
718 case DDF_RAID5_N_CONTINUE:
719 layout = ALGORITHM_ROTATING_N_CONTINUE;
720 break;
721 default:
722 return err_bad_ddf_layout(conf);
723 }
724 level = 6;
725 break;
726 default:
727 return err_bad_ddf_layout(conf);
728 };
729
730 good:
731 array->level = level;
732 array->layout = layout;
733 array->raid_disks = raiddisks;
734 return 0;
735 }
736
737 static int load_ddf_header(int fd, unsigned long long lba,
738 unsigned long long size,
739 int type,
740 struct ddf_header *hdr, struct ddf_header *anchor)
741 {
742 /* read a ddf header (primary or secondary) from fd/lba
743 * and check that it is consistent with anchor
744 * Need to check:
745 * magic, crc, guid, rev, and LBA's header_type, and
746 * everything after header_type must be the same
747 */
748 if (lba >= size-1)
749 return 0;
750
751 if (lseek64(fd, lba<<9, 0) < 0)
752 return 0;
753
754 if (read(fd, hdr, 512) != 512)
755 return 0;
756
757 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
758 pr_err("%s: bad header magic\n", __func__);
759 return 0;
760 }
761 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
762 pr_err("%s: bad CRC\n", __func__);
763 return 0;
764 }
765 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
766 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
767 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
768 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
769 hdr->type != type ||
770 memcmp(anchor->pad2, hdr->pad2, 512 -
771 offsetof(struct ddf_header, pad2)) != 0) {
772 pr_err("%s: header mismatch\n", __func__);
773 return 0;
774 }
775
776 /* Looks good enough to me... */
777 return 1;
778 }
779
780 static void *load_section(int fd, struct ddf_super *super, void *buf,
781 be32 offset_be, be32 len_be, int check)
782 {
783 unsigned long long offset = be32_to_cpu(offset_be);
784 unsigned long long len = be32_to_cpu(len_be);
785 int dofree = (buf == NULL);
786
787 if (check)
788 if (len != 2 && len != 8 && len != 32
789 && len != 128 && len != 512)
790 return NULL;
791
792 if (len > 1024)
793 return NULL;
794 if (buf) {
795 /* All pre-allocated sections are a single block */
796 if (len != 1)
797 return NULL;
798 } else if (posix_memalign(&buf, 512, len<<9) != 0)
799 buf = NULL;
800
801 if (!buf)
802 return NULL;
803
804 if (super->active->type == 1)
805 offset += be64_to_cpu(super->active->primary_lba);
806 else
807 offset += be64_to_cpu(super->active->secondary_lba);
808
809 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
810 if (dofree)
811 free(buf);
812 return NULL;
813 }
814 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
815 if (dofree)
816 free(buf);
817 return NULL;
818 }
819 return buf;
820 }
821
822 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
823 {
824 unsigned long long dsize;
825
826 get_dev_size(fd, NULL, &dsize);
827
828 if (lseek64(fd, dsize-512, 0) < 0) {
829 if (devname)
830 pr_err("Cannot seek to anchor block on %s: %s\n",
831 devname, strerror(errno));
832 return 1;
833 }
834 if (read(fd, &super->anchor, 512) != 512) {
835 if (devname)
836 pr_err("Cannot read anchor block on %s: %s\n",
837 devname, strerror(errno));
838 return 1;
839 }
840 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
841 if (devname)
842 pr_err("no DDF anchor found on %s\n",
843 devname);
844 return 2;
845 }
846 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
847 if (devname)
848 pr_err("bad CRC on anchor on %s\n",
849 devname);
850 return 2;
851 }
852 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
853 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
854 if (devname)
855 pr_err("can only support super revision"
856 " %.8s and earlier, not %.8s on %s\n",
857 DDF_REVISION_2, super->anchor.revision,devname);
858 return 2;
859 }
860 super->active = NULL;
861 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
862 dsize >> 9, 1,
863 &super->primary, &super->anchor) == 0) {
864 if (devname)
865 pr_err("Failed to load primary DDF header "
866 "on %s\n", devname);
867 } else
868 super->active = &super->primary;
869
870 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
871 dsize >> 9, 2,
872 &super->secondary, &super->anchor)) {
873 if (super->active == NULL
874 || (be32_to_cpu(super->primary.seq)
875 < be32_to_cpu(super->secondary.seq) &&
876 !super->secondary.openflag)
877 || (be32_to_cpu(super->primary.seq)
878 == be32_to_cpu(super->secondary.seq) &&
879 super->primary.openflag && !super->secondary.openflag)
880 )
881 super->active = &super->secondary;
882 } else if (devname)
883 pr_err("Failed to load secondary DDF header on %s\n",
884 devname);
885 if (super->active == NULL)
886 return 2;
887 return 0;
888 }
889
890 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
891 {
892 void *ok;
893 ok = load_section(fd, super, &super->controller,
894 super->active->controller_section_offset,
895 super->active->controller_section_length,
896 0);
897 super->phys = load_section(fd, super, NULL,
898 super->active->phys_section_offset,
899 super->active->phys_section_length,
900 1);
901 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
902
903 super->virt = load_section(fd, super, NULL,
904 super->active->virt_section_offset,
905 super->active->virt_section_length,
906 1);
907 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
908 if (!ok ||
909 !super->phys ||
910 !super->virt) {
911 free(super->phys);
912 free(super->virt);
913 super->phys = NULL;
914 super->virt = NULL;
915 return 2;
916 }
917 super->conflist = NULL;
918 super->dlist = NULL;
919
920 super->max_part = be16_to_cpu(super->active->max_partitions);
921 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
922 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
923 return 0;
924 }
925
926 #define DDF_UNUSED_BVD 0xff
927 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
928 {
929 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
930 unsigned int i, vdsize;
931 void *p;
932 if (n_vds == 0) {
933 vcl->other_bvds = NULL;
934 return 0;
935 }
936 vdsize = ddf->conf_rec_len * 512;
937 if (posix_memalign(&p, 512, n_vds *
938 (vdsize + sizeof(struct vd_config *))) != 0)
939 return -1;
940 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
941 for (i = 0; i < n_vds; i++) {
942 vcl->other_bvds[i] = p + i * vdsize;
943 memset(vcl->other_bvds[i], 0, vdsize);
944 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
945 }
946 return 0;
947 }
948
949 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
950 unsigned int len)
951 {
952 int i;
953 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
954 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
955 break;
956
957 if (i < vcl->conf.sec_elmnt_count-1) {
958 if (be32_to_cpu(vd->seqnum) <=
959 be32_to_cpu(vcl->other_bvds[i]->seqnum))
960 return;
961 } else {
962 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
963 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
964 break;
965 if (i == vcl->conf.sec_elmnt_count-1) {
966 pr_err("no space for sec level config %u, count is %u\n",
967 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
968 return;
969 }
970 }
971 memcpy(vcl->other_bvds[i], vd, len);
972 }
973
974 static int load_ddf_local(int fd, struct ddf_super *super,
975 char *devname, int keep)
976 {
977 struct dl *dl;
978 struct stat stb;
979 char *conf;
980 unsigned int i;
981 unsigned int confsec;
982 int vnum;
983 unsigned int max_virt_disks = be16_to_cpu
984 (super->active->max_vd_entries);
985 unsigned long long dsize;
986
987 /* First the local disk info */
988 if (posix_memalign((void**)&dl, 512,
989 sizeof(*dl) +
990 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
991 pr_err("%s could not allocate disk info buffer\n",
992 __func__);
993 return 1;
994 }
995
996 load_section(fd, super, &dl->disk,
997 super->active->data_section_offset,
998 super->active->data_section_length,
999 0);
1000 dl->devname = devname ? xstrdup(devname) : NULL;
1001
1002 fstat(fd, &stb);
1003 dl->major = major(stb.st_rdev);
1004 dl->minor = minor(stb.st_rdev);
1005 dl->next = super->dlist;
1006 dl->fd = keep ? fd : -1;
1007
1008 dl->size = 0;
1009 if (get_dev_size(fd, devname, &dsize))
1010 dl->size = dsize >> 9;
1011 /* If the disks have different sizes, the LBAs will differ
1012 * between phys disks.
1013 * At this point here, the values in super->active must be valid
1014 * for this phys disk. */
1015 dl->primary_lba = super->active->primary_lba;
1016 dl->secondary_lba = super->active->secondary_lba;
1017 dl->workspace_lba = super->active->workspace_lba;
1018 dl->spare = NULL;
1019 for (i = 0 ; i < super->max_part ; i++)
1020 dl->vlist[i] = NULL;
1021 super->dlist = dl;
1022 dl->pdnum = -1;
1023 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1024 if (memcmp(super->phys->entries[i].guid,
1025 dl->disk.guid, DDF_GUID_LEN) == 0)
1026 dl->pdnum = i;
1027
1028 /* Now the config list. */
1029 /* 'conf' is an array of config entries, some of which are
1030 * probably invalid. Those which are good need to be copied into
1031 * the conflist
1032 */
1033
1034 conf = load_section(fd, super, NULL,
1035 super->active->config_section_offset,
1036 super->active->config_section_length,
1037 0);
1038
1039 vnum = 0;
1040 for (confsec = 0;
1041 confsec < be32_to_cpu(super->active->config_section_length);
1042 confsec += super->conf_rec_len) {
1043 struct vd_config *vd =
1044 (struct vd_config *)((char*)conf + confsec*512);
1045 struct vcl *vcl;
1046
1047 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1048 if (dl->spare)
1049 continue;
1050 if (posix_memalign((void**)&dl->spare, 512,
1051 super->conf_rec_len*512) != 0) {
1052 pr_err("%s could not allocate spare info buf\n",
1053 __func__);
1054 return 1;
1055 }
1056
1057 memcpy(dl->spare, vd, super->conf_rec_len*512);
1058 continue;
1059 }
1060 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1061 continue;
1062 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1063 if (memcmp(vcl->conf.guid,
1064 vd->guid, DDF_GUID_LEN) == 0)
1065 break;
1066 }
1067
1068 if (vcl) {
1069 dl->vlist[vnum++] = vcl;
1070 if (vcl->other_bvds != NULL &&
1071 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1072 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1073 continue;
1074 }
1075 if (be32_to_cpu(vd->seqnum) <=
1076 be32_to_cpu(vcl->conf.seqnum))
1077 continue;
1078 } else {
1079 if (posix_memalign((void**)&vcl, 512,
1080 (super->conf_rec_len*512 +
1081 offsetof(struct vcl, conf))) != 0) {
1082 pr_err("%s could not allocate vcl buf\n",
1083 __func__);
1084 return 1;
1085 }
1086 vcl->next = super->conflist;
1087 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1088 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1089 if (alloc_other_bvds(super, vcl) != 0) {
1090 pr_err("%s could not allocate other bvds\n",
1091 __func__);
1092 free(vcl);
1093 return 1;
1094 };
1095 super->conflist = vcl;
1096 dl->vlist[vnum++] = vcl;
1097 }
1098 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1099 for (i=0; i < max_virt_disks ; i++)
1100 if (memcmp(super->virt->entries[i].guid,
1101 vcl->conf.guid, DDF_GUID_LEN)==0)
1102 break;
1103 if (i < max_virt_disks)
1104 vcl->vcnum = i;
1105 }
1106 free(conf);
1107
1108 return 0;
1109 }
1110
1111 #ifndef MDASSEMBLE
1112 static int load_super_ddf_all(struct supertype *st, int fd,
1113 void **sbp, char *devname);
1114 #endif
1115
1116 static void free_super_ddf(struct supertype *st);
1117
1118 static int load_super_ddf(struct supertype *st, int fd,
1119 char *devname)
1120 {
1121 unsigned long long dsize;
1122 struct ddf_super *super;
1123 int rv;
1124
1125 if (get_dev_size(fd, devname, &dsize) == 0)
1126 return 1;
1127
1128 if (!st->ignore_hw_compat && test_partition(fd))
1129 /* DDF is not allowed on partitions */
1130 return 1;
1131
1132 /* 32M is a lower bound */
1133 if (dsize <= 32*1024*1024) {
1134 if (devname)
1135 pr_err("%s is too small for ddf: "
1136 "size is %llu sectors.\n",
1137 devname, dsize>>9);
1138 return 1;
1139 }
1140 if (dsize & 511) {
1141 if (devname)
1142 pr_err("%s is an odd size for ddf: "
1143 "size is %llu bytes.\n",
1144 devname, dsize);
1145 return 1;
1146 }
1147
1148 free_super_ddf(st);
1149
1150 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1151 pr_err("malloc of %zu failed.\n",
1152 sizeof(*super));
1153 return 1;
1154 }
1155 memset(super, 0, sizeof(*super));
1156
1157 rv = load_ddf_headers(fd, super, devname);
1158 if (rv) {
1159 free(super);
1160 return rv;
1161 }
1162
1163 /* Have valid headers and have chosen the best. Let's read in the rest*/
1164
1165 rv = load_ddf_global(fd, super, devname);
1166
1167 if (rv) {
1168 if (devname)
1169 pr_err("Failed to load all information "
1170 "sections on %s\n", devname);
1171 free(super);
1172 return rv;
1173 }
1174
1175 rv = load_ddf_local(fd, super, devname, 0);
1176
1177 if (rv) {
1178 if (devname)
1179 pr_err("Failed to load all information "
1180 "sections on %s\n", devname);
1181 free(super);
1182 return rv;
1183 }
1184
1185 /* Should possibly check the sections .... */
1186
1187 st->sb = super;
1188 if (st->ss == NULL) {
1189 st->ss = &super_ddf;
1190 st->minor_version = 0;
1191 st->max_devs = 512;
1192 }
1193 return 0;
1194
1195 }
1196
1197 static void free_super_ddf(struct supertype *st)
1198 {
1199 struct ddf_super *ddf = st->sb;
1200 if (ddf == NULL)
1201 return;
1202 free(ddf->phys);
1203 free(ddf->virt);
1204 while (ddf->conflist) {
1205 struct vcl *v = ddf->conflist;
1206 ddf->conflist = v->next;
1207 if (v->block_sizes)
1208 free(v->block_sizes);
1209 if (v->other_bvds)
1210 /*
1211 v->other_bvds[0] points to beginning of buffer,
1212 see alloc_other_bvds()
1213 */
1214 free(v->other_bvds[0]);
1215 free(v);
1216 }
1217 while (ddf->dlist) {
1218 struct dl *d = ddf->dlist;
1219 ddf->dlist = d->next;
1220 if (d->fd >= 0)
1221 close(d->fd);
1222 if (d->spare)
1223 free(d->spare);
1224 free(d);
1225 }
1226 while (ddf->add_list) {
1227 struct dl *d = ddf->add_list;
1228 ddf->add_list = d->next;
1229 if (d->fd >= 0)
1230 close(d->fd);
1231 if (d->spare)
1232 free(d->spare);
1233 free(d);
1234 }
1235 free(ddf);
1236 st->sb = NULL;
1237 }
1238
1239 static struct supertype *match_metadata_desc_ddf(char *arg)
1240 {
1241 /* 'ddf' only support containers */
1242 struct supertype *st;
1243 if (strcmp(arg, "ddf") != 0 &&
1244 strcmp(arg, "default") != 0
1245 )
1246 return NULL;
1247
1248 st = xcalloc(1, sizeof(*st));
1249 st->ss = &super_ddf;
1250 st->max_devs = 512;
1251 st->minor_version = 0;
1252 st->sb = NULL;
1253 return st;
1254 }
1255
1256 #ifndef MDASSEMBLE
1257
1258 static mapping_t ddf_state[] = {
1259 { "Optimal", 0},
1260 { "Degraded", 1},
1261 { "Deleted", 2},
1262 { "Missing", 3},
1263 { "Failed", 4},
1264 { "Partially Optimal", 5},
1265 { "-reserved-", 6},
1266 { "-reserved-", 7},
1267 { NULL, 0}
1268 };
1269
1270 static mapping_t ddf_init_state[] = {
1271 { "Not Initialised", 0},
1272 { "QuickInit in Progress", 1},
1273 { "Fully Initialised", 2},
1274 { "*UNKNOWN*", 3},
1275 { NULL, 0}
1276 };
1277 static mapping_t ddf_access[] = {
1278 { "Read/Write", 0},
1279 { "Reserved", 1},
1280 { "Read Only", 2},
1281 { "Blocked (no access)", 3},
1282 { NULL ,0}
1283 };
1284
1285 static mapping_t ddf_level[] = {
1286 { "RAID0", DDF_RAID0},
1287 { "RAID1", DDF_RAID1},
1288 { "RAID3", DDF_RAID3},
1289 { "RAID4", DDF_RAID4},
1290 { "RAID5", DDF_RAID5},
1291 { "RAID1E",DDF_RAID1E},
1292 { "JBOD", DDF_JBOD},
1293 { "CONCAT",DDF_CONCAT},
1294 { "RAID5E",DDF_RAID5E},
1295 { "RAID5EE",DDF_RAID5EE},
1296 { "RAID6", DDF_RAID6},
1297 { NULL, 0}
1298 };
1299 static mapping_t ddf_sec_level[] = {
1300 { "Striped", DDF_2STRIPED},
1301 { "Mirrored", DDF_2MIRRORED},
1302 { "Concat", DDF_2CONCAT},
1303 { "Spanned", DDF_2SPANNED},
1304 { NULL, 0}
1305 };
1306 #endif
1307
1308 static int all_ff(const char *guid)
1309 {
1310 int i;
1311 for (i = 0; i < DDF_GUID_LEN; i++)
1312 if (guid[i] != (char)0xff)
1313 return 0;
1314 return 1;
1315 }
1316
1317 static const char *guid_str(const char *guid)
1318 {
1319 static char buf[DDF_GUID_LEN*2+1];
1320 int i;
1321 char *p = buf;
1322 for (i = 0; i < DDF_GUID_LEN; i++) {
1323 unsigned char c = guid[i];
1324 if (c >= 32 && c < 127)
1325 p += sprintf(p, "%c", c);
1326 else
1327 p += sprintf(p, "%02x", c);
1328 }
1329 *p = '\0';
1330 return (const char *) buf;
1331 }
1332
1333 #ifndef MDASSEMBLE
1334 static void print_guid(char *guid, int tstamp)
1335 {
1336 /* A GUIDs are part (or all) ASCII and part binary.
1337 * They tend to be space padded.
1338 * We print the GUID in HEX, then in parentheses add
1339 * any initial ASCII sequence, and a possible
1340 * time stamp from bytes 16-19
1341 */
1342 int l = DDF_GUID_LEN;
1343 int i;
1344
1345 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1346 if ((i&3)==0 && i != 0) printf(":");
1347 printf("%02X", guid[i]&255);
1348 }
1349
1350 printf("\n (");
1351 while (l && guid[l-1] == ' ')
1352 l--;
1353 for (i=0 ; i<l ; i++) {
1354 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1355 fputc(guid[i], stdout);
1356 else
1357 break;
1358 }
1359 if (tstamp) {
1360 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1361 char tbuf[100];
1362 struct tm *tm;
1363 tm = localtime(&then);
1364 strftime(tbuf, 100, " %D %T",tm);
1365 fputs(tbuf, stdout);
1366 }
1367 printf(")");
1368 }
1369
1370 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1371 {
1372 int crl = sb->conf_rec_len;
1373 struct vcl *vcl;
1374
1375 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1376 unsigned int i;
1377 struct vd_config *vc = &vcl->conf;
1378
1379 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1380 continue;
1381 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1382 continue;
1383
1384 /* Ok, we know about this VD, let's give more details */
1385 printf(" Raid Devices[%d] : %d (", n,
1386 be16_to_cpu(vc->prim_elmnt_count));
1387 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1388 int j;
1389 int cnt = be16_to_cpu(sb->phys->used_pdes);
1390 for (j=0; j<cnt; j++)
1391 if (be32_eq(vc->phys_refnum[i],
1392 sb->phys->entries[j].refnum))
1393 break;
1394 if (i) printf(" ");
1395 if (j < cnt)
1396 printf("%d", j);
1397 else
1398 printf("--");
1399 }
1400 printf(")\n");
1401 if (vc->chunk_shift != 255)
1402 printf(" Chunk Size[%d] : %d sectors\n", n,
1403 1 << vc->chunk_shift);
1404 printf(" Raid Level[%d] : %s\n", n,
1405 map_num(ddf_level, vc->prl)?:"-unknown-");
1406 if (vc->sec_elmnt_count != 1) {
1407 printf(" Secondary Position[%d] : %d of %d\n", n,
1408 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1409 printf(" Secondary Level[%d] : %s\n", n,
1410 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1411 }
1412 printf(" Device Size[%d] : %llu\n", n,
1413 be64_to_cpu(vc->blocks)/2);
1414 printf(" Array Size[%d] : %llu\n", n,
1415 be64_to_cpu(vc->array_blocks)/2);
1416 }
1417 }
1418
1419 static void examine_vds(struct ddf_super *sb)
1420 {
1421 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1422 unsigned int i;
1423 printf(" Virtual Disks : %d\n", cnt);
1424
1425 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1426 struct virtual_entry *ve = &sb->virt->entries[i];
1427 if (all_ff(ve->guid))
1428 continue;
1429 printf("\n");
1430 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1431 printf("\n");
1432 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1433 printf(" state[%d] : %s, %s%s\n", i,
1434 map_num(ddf_state, ve->state & 7),
1435 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1436 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1437 printf(" init state[%d] : %s\n", i,
1438 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1439 printf(" access[%d] : %s\n", i,
1440 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1441 printf(" Name[%d] : %.16s\n", i, ve->name);
1442 examine_vd(i, sb, ve->guid);
1443 }
1444 if (cnt) printf("\n");
1445 }
1446
1447 static void examine_pds(struct ddf_super *sb)
1448 {
1449 int cnt = be16_to_cpu(sb->phys->used_pdes);
1450 int i;
1451 struct dl *dl;
1452 printf(" Physical Disks : %d\n", cnt);
1453 printf(" Number RefNo Size Device Type/State\n");
1454
1455 for (i=0 ; i<cnt ; i++) {
1456 struct phys_disk_entry *pd = &sb->phys->entries[i];
1457 int type = be16_to_cpu(pd->type);
1458 int state = be16_to_cpu(pd->state);
1459
1460 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1461 //printf("\n");
1462 printf(" %3d %08x ", i,
1463 be32_to_cpu(pd->refnum));
1464 printf("%8lluK ",
1465 be64_to_cpu(pd->config_size)>>1);
1466 for (dl = sb->dlist; dl ; dl = dl->next) {
1467 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1468 char *dv = map_dev(dl->major, dl->minor, 0);
1469 if (dv) {
1470 printf("%-15s", dv);
1471 break;
1472 }
1473 }
1474 }
1475 if (!dl)
1476 printf("%15s","");
1477 printf(" %s%s%s%s%s",
1478 (type&2) ? "active":"",
1479 (type&4) ? "Global-Spare":"",
1480 (type&8) ? "spare" : "",
1481 (type&16)? ", foreign" : "",
1482 (type&32)? "pass-through" : "");
1483 if (state & DDF_Failed)
1484 /* This over-rides these three */
1485 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1486 printf("/%s%s%s%s%s%s%s",
1487 (state&1)? "Online": "Offline",
1488 (state&2)? ", Failed": "",
1489 (state&4)? ", Rebuilding": "",
1490 (state&8)? ", in-transition": "",
1491 (state&16)? ", SMART-errors": "",
1492 (state&32)? ", Unrecovered-Read-Errors": "",
1493 (state&64)? ", Missing" : "");
1494 printf("\n");
1495 }
1496 }
1497
1498 static void examine_super_ddf(struct supertype *st, char *homehost)
1499 {
1500 struct ddf_super *sb = st->sb;
1501
1502 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1503 printf(" Version : %.8s\n", sb->anchor.revision);
1504 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1505 printf("\n");
1506 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1507 printf("\n");
1508 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1509 printf(" Redundant hdr : %s\n", be32_eq(sb->secondary.magic,
1510 DDF_HEADER_MAGIC)
1511 ?"yes" : "no");
1512 examine_vds(sb);
1513 examine_pds(sb);
1514 }
1515
1516 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1517
1518 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1519 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1520
1521 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1522 {
1523 /*
1524 * Figure out the VD number for this supertype.
1525 * Returns DDF_CONTAINER for the container itself,
1526 * and DDF_NOTFOUND on error.
1527 */
1528 struct ddf_super *ddf = st->sb;
1529 struct mdinfo *sra;
1530 char *sub, *end;
1531 unsigned int vcnum;
1532
1533 if (*st->container_devnm == '\0')
1534 return DDF_CONTAINER;
1535
1536 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1537 if (!sra || sra->array.major_version != -1 ||
1538 sra->array.minor_version != -2 ||
1539 !is_subarray(sra->text_version))
1540 return DDF_NOTFOUND;
1541
1542 sub = strchr(sra->text_version + 1, '/');
1543 if (sub != NULL)
1544 vcnum = strtoul(sub + 1, &end, 10);
1545 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1546 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1547 return DDF_NOTFOUND;
1548
1549 return vcnum;
1550 }
1551
1552 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1553 {
1554 /* We just write a generic DDF ARRAY entry
1555 */
1556 struct mdinfo info;
1557 char nbuf[64];
1558 getinfo_super_ddf(st, &info, NULL);
1559 fname_from_uuid(st, &info, nbuf, ':');
1560
1561 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1562 }
1563
1564 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1565 {
1566 /* We just write a generic DDF ARRAY entry
1567 */
1568 struct ddf_super *ddf = st->sb;
1569 struct mdinfo info;
1570 unsigned int i;
1571 char nbuf[64];
1572 getinfo_super_ddf(st, &info, NULL);
1573 fname_from_uuid(st, &info, nbuf, ':');
1574
1575 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1576 struct virtual_entry *ve = &ddf->virt->entries[i];
1577 struct vcl vcl;
1578 char nbuf1[64];
1579 if (all_ff(ve->guid))
1580 continue;
1581 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1582 ddf->currentconf =&vcl;
1583 uuid_from_super_ddf(st, info.uuid);
1584 fname_from_uuid(st, &info, nbuf1, ':');
1585 printf("ARRAY container=%s member=%d UUID=%s\n",
1586 nbuf+5, i, nbuf1+5);
1587 }
1588 }
1589
1590 static void export_examine_super_ddf(struct supertype *st)
1591 {
1592 struct mdinfo info;
1593 char nbuf[64];
1594 getinfo_super_ddf(st, &info, NULL);
1595 fname_from_uuid(st, &info, nbuf, ':');
1596 printf("MD_METADATA=ddf\n");
1597 printf("MD_LEVEL=container\n");
1598 printf("MD_UUID=%s\n", nbuf+5);
1599 }
1600
1601 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1602 {
1603 void *buf;
1604 unsigned long long dsize, offset;
1605 int bytes;
1606 struct ddf_header *ddf;
1607 int written = 0;
1608
1609 /* The meta consists of an anchor, a primary, and a secondary.
1610 * This all lives at the end of the device.
1611 * So it is easiest to find the earliest of primary and
1612 * secondary, and copy everything from there.
1613 *
1614 * Anchor is 512 from end It contains primary_lba and secondary_lba
1615 * we choose one of those
1616 */
1617
1618 if (posix_memalign(&buf, 4096, 4096) != 0)
1619 return 1;
1620
1621 if (!get_dev_size(from, NULL, &dsize))
1622 goto err;
1623
1624 if (lseek64(from, dsize-512, 0) < 0)
1625 goto err;
1626 if (read(from, buf, 512) != 512)
1627 goto err;
1628 ddf = buf;
1629 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1630 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1631 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1632 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1633 goto err;
1634
1635 offset = dsize - 512;
1636 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1637 offset = be64_to_cpu(ddf->primary_lba) << 9;
1638 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1639 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1640
1641 bytes = dsize - offset;
1642
1643 if (lseek64(from, offset, 0) < 0 ||
1644 lseek64(to, offset, 0) < 0)
1645 goto err;
1646 while (written < bytes) {
1647 int n = bytes - written;
1648 if (n > 4096)
1649 n = 4096;
1650 if (read(from, buf, n) != n)
1651 goto err;
1652 if (write(to, buf, n) != n)
1653 goto err;
1654 written += n;
1655 }
1656 free(buf);
1657 return 0;
1658 err:
1659 free(buf);
1660 return 1;
1661 }
1662
1663 static void detail_super_ddf(struct supertype *st, char *homehost)
1664 {
1665 /* FIXME later
1666 * Could print DDF GUID
1667 * Need to find which array
1668 * If whole, briefly list all arrays
1669 * If one, give name
1670 */
1671 }
1672
1673 static void brief_detail_super_ddf(struct supertype *st)
1674 {
1675 struct mdinfo info;
1676 char nbuf[64];
1677 struct ddf_super *ddf = st->sb;
1678 unsigned int vcnum = get_vd_num_of_subarray(st);
1679 if (vcnum == DDF_CONTAINER)
1680 uuid_from_super_ddf(st, info.uuid);
1681 else if (vcnum == DDF_NOTFOUND)
1682 return;
1683 else
1684 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, info.uuid);
1685 fname_from_uuid(st, &info, nbuf,':');
1686 printf(" UUID=%s", nbuf + 5);
1687 }
1688 #endif
1689
1690 static int match_home_ddf(struct supertype *st, char *homehost)
1691 {
1692 /* It matches 'this' host if the controller is a
1693 * Linux-MD controller with vendor_data matching
1694 * the hostname
1695 */
1696 struct ddf_super *ddf = st->sb;
1697 unsigned int len;
1698
1699 if (!homehost)
1700 return 0;
1701 len = strlen(homehost);
1702
1703 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1704 len < sizeof(ddf->controller.vendor_data) &&
1705 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1706 ddf->controller.vendor_data[len] == 0);
1707 }
1708
1709 #ifndef MDASSEMBLE
1710 static int find_index_in_bvd(const struct ddf_super *ddf,
1711 const struct vd_config *conf, unsigned int n,
1712 unsigned int *n_bvd)
1713 {
1714 /*
1715 * Find the index of the n-th valid physical disk in this BVD
1716 */
1717 unsigned int i, j;
1718 for (i = 0, j = 0; i < ddf->mppe &&
1719 j < be16_to_cpu(conf->prim_elmnt_count); i++) {
1720 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1721 if (n == j) {
1722 *n_bvd = i;
1723 return 1;
1724 }
1725 j++;
1726 }
1727 }
1728 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1729 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1730 return 0;
1731 }
1732
1733 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1734 unsigned int n,
1735 unsigned int *n_bvd, struct vcl **vcl)
1736 {
1737 struct vcl *v;
1738
1739 for (v = ddf->conflist; v; v = v->next) {
1740 unsigned int nsec, ibvd = 0;
1741 struct vd_config *conf;
1742 if (inst != v->vcnum)
1743 continue;
1744 conf = &v->conf;
1745 if (conf->sec_elmnt_count == 1) {
1746 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1747 *vcl = v;
1748 return conf;
1749 } else
1750 goto bad;
1751 }
1752 if (v->other_bvds == NULL) {
1753 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1754 __func__, conf->sec_elmnt_count);
1755 goto bad;
1756 }
1757 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1758 if (conf->sec_elmnt_seq != nsec) {
1759 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1760 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1761 == nsec)
1762 break;
1763 }
1764 if (ibvd == conf->sec_elmnt_count)
1765 goto bad;
1766 conf = v->other_bvds[ibvd-1];
1767 }
1768 if (!find_index_in_bvd(ddf, conf,
1769 n - nsec*conf->sec_elmnt_count, n_bvd))
1770 goto bad;
1771 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1772 , __func__, n, *n_bvd, ibvd, inst);
1773 *vcl = v;
1774 return conf;
1775 }
1776 bad:
1777 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1778 return NULL;
1779 }
1780 #endif
1781
1782 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1783 {
1784 /* Find the entry in phys_disk which has the given refnum
1785 * and return it's index
1786 */
1787 unsigned int i;
1788 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1789 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1790 return i;
1791 return -1;
1792 }
1793
1794 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1795 {
1796 char buf[20];
1797 struct sha1_ctx ctx;
1798 sha1_init_ctx(&ctx);
1799 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1800 sha1_finish_ctx(&ctx, buf);
1801 memcpy(uuid, buf, 4*4);
1802 }
1803
1804 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1805 {
1806 /* The uuid returned here is used for:
1807 * uuid to put into bitmap file (Create, Grow)
1808 * uuid for backup header when saving critical section (Grow)
1809 * comparing uuids when re-adding a device into an array
1810 * In these cases the uuid required is that of the data-array,
1811 * not the device-set.
1812 * uuid to recognise same set when adding a missing device back
1813 * to an array. This is a uuid for the device-set.
1814 *
1815 * For each of these we can make do with a truncated
1816 * or hashed uuid rather than the original, as long as
1817 * everyone agrees.
1818 * In the case of SVD we assume the BVD is of interest,
1819 * though that might be the case if a bitmap were made for
1820 * a mirrored SVD - worry about that later.
1821 * So we need to find the VD configuration record for the
1822 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1823 * The first 16 bytes of the sha1 of these is used.
1824 */
1825 struct ddf_super *ddf = st->sb;
1826 struct vcl *vcl = ddf->currentconf;
1827 char *guid;
1828
1829 if (vcl)
1830 guid = vcl->conf.guid;
1831 else
1832 guid = ddf->anchor.guid;
1833 uuid_from_ddf_guid(guid, uuid);
1834 }
1835
1836 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1837
1838 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1839 {
1840 struct ddf_super *ddf = st->sb;
1841 int map_disks = info->array.raid_disks;
1842 __u32 *cptr;
1843
1844 if (ddf->currentconf) {
1845 getinfo_super_ddf_bvd(st, info, map);
1846 return;
1847 }
1848 memset(info, 0, sizeof(*info));
1849
1850 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1851 info->array.level = LEVEL_CONTAINER;
1852 info->array.layout = 0;
1853 info->array.md_minor = -1;
1854 cptr = (__u32 *)(ddf->anchor.guid + 16);
1855 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1856
1857 info->array.utime = 0;
1858 info->array.chunk_size = 0;
1859 info->container_enough = 1;
1860
1861 info->disk.major = 0;
1862 info->disk.minor = 0;
1863 if (ddf->dlist) {
1864 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1865 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1866
1867 info->data_offset = be64_to_cpu(ddf->phys->
1868 entries[info->disk.raid_disk].
1869 config_size);
1870 info->component_size = ddf->dlist->size - info->data_offset;
1871 } else {
1872 info->disk.number = -1;
1873 info->disk.raid_disk = -1;
1874 // info->disk.raid_disk = find refnum in the table and use index;
1875 }
1876 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1877
1878 info->recovery_start = MaxSector;
1879 info->reshape_active = 0;
1880 info->recovery_blocked = 0;
1881 info->name[0] = 0;
1882
1883 info->array.major_version = -1;
1884 info->array.minor_version = -2;
1885 strcpy(info->text_version, "ddf");
1886 info->safe_mode_delay = 0;
1887
1888 uuid_from_super_ddf(st, info->uuid);
1889
1890 if (map) {
1891 int i;
1892 for (i = 0 ; i < map_disks; i++) {
1893 if (i < info->array.raid_disks &&
1894 (be16_to_cpu(ddf->phys->entries[i].state)
1895 & DDF_Online) &&
1896 !(be16_to_cpu(ddf->phys->entries[i].state)
1897 & DDF_Failed))
1898 map[i] = 1;
1899 else
1900 map[i] = 0;
1901 }
1902 }
1903 }
1904
1905 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1906 {
1907 struct ddf_super *ddf = st->sb;
1908 struct vcl *vc = ddf->currentconf;
1909 int cd = ddf->currentdev;
1910 int n_prim;
1911 int j;
1912 struct dl *dl;
1913 int map_disks = info->array.raid_disks;
1914 __u32 *cptr;
1915 struct vd_config *conf;
1916
1917 memset(info, 0, sizeof(*info));
1918 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1919 return;
1920 info->array.md_minor = -1;
1921 cptr = (__u32 *)(vc->conf.guid + 16);
1922 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1923 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
1924 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1925 info->custom_array_size = 0;
1926
1927 conf = &vc->conf;
1928 n_prim = be16_to_cpu(conf->prim_elmnt_count);
1929 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
1930 int ibvd = cd / n_prim - 1;
1931 cd %= n_prim;
1932 conf = vc->other_bvds[ibvd];
1933 }
1934
1935 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1936 info->data_offset =
1937 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
1938 if (vc->block_sizes)
1939 info->component_size = vc->block_sizes[cd];
1940 else
1941 info->component_size = be64_to_cpu(conf->blocks);
1942 }
1943
1944 for (dl = ddf->dlist; dl ; dl = dl->next)
1945 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
1946 break;
1947
1948 info->disk.major = 0;
1949 info->disk.minor = 0;
1950 info->disk.state = 0;
1951 if (dl) {
1952 info->disk.major = dl->major;
1953 info->disk.minor = dl->minor;
1954 info->disk.raid_disk = cd + conf->sec_elmnt_seq
1955 * be16_to_cpu(conf->prim_elmnt_count);
1956 info->disk.number = dl->pdnum;
1957 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
1958 }
1959
1960 info->container_member = ddf->currentconf->vcnum;
1961
1962 info->recovery_start = MaxSector;
1963 info->resync_start = 0;
1964 info->reshape_active = 0;
1965 info->recovery_blocked = 0;
1966 if (!(ddf->virt->entries[info->container_member].state
1967 & DDF_state_inconsistent) &&
1968 (ddf->virt->entries[info->container_member].init_state
1969 & DDF_initstate_mask)
1970 == DDF_init_full)
1971 info->resync_start = MaxSector;
1972
1973 uuid_from_super_ddf(st, info->uuid);
1974
1975 info->array.major_version = -1;
1976 info->array.minor_version = -2;
1977 sprintf(info->text_version, "/%s/%d",
1978 st->container_devnm,
1979 info->container_member);
1980 info->safe_mode_delay = 200;
1981
1982 memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
1983 info->name[16]=0;
1984 for(j=0; j<16; j++)
1985 if (info->name[j] == ' ')
1986 info->name[j] = 0;
1987
1988 if (map)
1989 for (j = 0; j < map_disks; j++) {
1990 map[j] = 0;
1991 if (j < info->array.raid_disks) {
1992 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
1993 if (i >= 0 &&
1994 (be16_to_cpu(ddf->phys->entries[i].state)
1995 & DDF_Online) &&
1996 !(be16_to_cpu(ddf->phys->entries[i].state)
1997 & DDF_Failed))
1998 map[i] = 1;
1999 }
2000 }
2001 }
2002
2003 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2004 char *update,
2005 char *devname, int verbose,
2006 int uuid_set, char *homehost)
2007 {
2008 /* For 'assemble' and 'force' we need to return non-zero if any
2009 * change was made. For others, the return value is ignored.
2010 * Update options are:
2011 * force-one : This device looks a bit old but needs to be included,
2012 * update age info appropriately.
2013 * assemble: clear any 'faulty' flag to allow this device to
2014 * be assembled.
2015 * force-array: Array is degraded but being forced, mark it clean
2016 * if that will be needed to assemble it.
2017 *
2018 * newdev: not used ????
2019 * grow: Array has gained a new device - this is currently for
2020 * linear only
2021 * resync: mark as dirty so a resync will happen.
2022 * uuid: Change the uuid of the array to match what is given
2023 * homehost: update the recorded homehost
2024 * name: update the name - preserving the homehost
2025 * _reshape_progress: record new reshape_progress position.
2026 *
2027 * Following are not relevant for this version:
2028 * sparc2.2 : update from old dodgey metadata
2029 * super-minor: change the preferred_minor number
2030 * summaries: update redundant counters.
2031 */
2032 int rv = 0;
2033 // struct ddf_super *ddf = st->sb;
2034 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2035 // struct virtual_entry *ve = find_ve(ddf);
2036
2037 /* we don't need to handle "force-*" or "assemble" as
2038 * there is no need to 'trick' the kernel. We the metadata is
2039 * first updated to activate the array, all the implied modifications
2040 * will just happen.
2041 */
2042
2043 if (strcmp(update, "grow") == 0) {
2044 /* FIXME */
2045 } else if (strcmp(update, "resync") == 0) {
2046 // info->resync_checkpoint = 0;
2047 } else if (strcmp(update, "homehost") == 0) {
2048 /* homehost is stored in controller->vendor_data,
2049 * or it is when we are the vendor
2050 */
2051 // if (info->vendor_is_local)
2052 // strcpy(ddf->controller.vendor_data, homehost);
2053 rv = -1;
2054 } else if (strcmp(update, "name") == 0) {
2055 /* name is stored in virtual_entry->name */
2056 // memset(ve->name, ' ', 16);
2057 // strncpy(ve->name, info->name, 16);
2058 rv = -1;
2059 } else if (strcmp(update, "_reshape_progress") == 0) {
2060 /* We don't support reshape yet */
2061 } else if (strcmp(update, "assemble") == 0 ) {
2062 /* Do nothing, just succeed */
2063 rv = 0;
2064 } else
2065 rv = -1;
2066
2067 // update_all_csum(ddf);
2068
2069 return rv;
2070 }
2071
2072 static void make_header_guid(char *guid)
2073 {
2074 be32 stamp;
2075 /* Create a DDF Header of Virtual Disk GUID */
2076
2077 /* 24 bytes of fiction required.
2078 * first 8 are a 'vendor-id' - "Linux-MD"
2079 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2080 * Remaining 8 random number plus timestamp
2081 */
2082 memcpy(guid, T10, sizeof(T10));
2083 stamp = cpu_to_be32(0xdeadbeef);
2084 memcpy(guid+8, &stamp, 4);
2085 stamp = cpu_to_be32(0);
2086 memcpy(guid+12, &stamp, 4);
2087 stamp = cpu_to_be32(time(0) - DECADE);
2088 memcpy(guid+16, &stamp, 4);
2089 stamp._v32 = random32();
2090 memcpy(guid+20, &stamp, 4);
2091 }
2092
2093 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2094 {
2095 unsigned int i;
2096 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2097 if (all_ff(ddf->virt->entries[i].guid))
2098 return i;
2099 }
2100 return DDF_NOTFOUND;
2101 }
2102
2103 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2104 const char *name)
2105 {
2106 unsigned int i;
2107 if (name == NULL)
2108 return DDF_NOTFOUND;
2109 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2110 if (all_ff(ddf->virt->entries[i].guid))
2111 continue;
2112 if (!strncmp(name, ddf->virt->entries[i].name,
2113 sizeof(ddf->virt->entries[i].name)))
2114 return i;
2115 }
2116 return DDF_NOTFOUND;
2117 }
2118
2119 #ifndef MDASSEMBLE
2120 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2121 const char *guid)
2122 {
2123 unsigned int i;
2124 if (guid == NULL || all_ff(guid))
2125 return DDF_NOTFOUND;
2126 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2127 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2128 return i;
2129 return DDF_NOTFOUND;
2130 }
2131 #endif
2132
2133 static int init_super_ddf_bvd(struct supertype *st,
2134 mdu_array_info_t *info,
2135 unsigned long long size,
2136 char *name, char *homehost,
2137 int *uuid, unsigned long long data_offset);
2138
2139 static int init_super_ddf(struct supertype *st,
2140 mdu_array_info_t *info,
2141 unsigned long long size, char *name, char *homehost,
2142 int *uuid, unsigned long long data_offset)
2143 {
2144 /* This is primarily called by Create when creating a new array.
2145 * We will then get add_to_super called for each component, and then
2146 * write_init_super called to write it out to each device.
2147 * For DDF, Create can create on fresh devices or on a pre-existing
2148 * array.
2149 * To create on a pre-existing array a different method will be called.
2150 * This one is just for fresh drives.
2151 *
2152 * We need to create the entire 'ddf' structure which includes:
2153 * DDF headers - these are easy.
2154 * Controller data - a Sector describing this controller .. not that
2155 * this is a controller exactly.
2156 * Physical Disk Record - one entry per device, so
2157 * leave plenty of space.
2158 * Virtual Disk Records - again, just leave plenty of space.
2159 * This just lists VDs, doesn't give details
2160 * Config records - describes the VDs that use this disk
2161 * DiskData - describes 'this' device.
2162 * BadBlockManagement - empty
2163 * Diag Space - empty
2164 * Vendor Logs - Could we put bitmaps here?
2165 *
2166 */
2167 struct ddf_super *ddf;
2168 char hostname[17];
2169 int hostlen;
2170 int max_phys_disks, max_virt_disks;
2171 unsigned long long sector;
2172 int clen;
2173 int i;
2174 int pdsize, vdsize;
2175 struct phys_disk *pd;
2176 struct virtual_disk *vd;
2177
2178 if (data_offset != INVALID_SECTORS) {
2179 pr_err("data-offset not supported by DDF\n");
2180 return 0;
2181 }
2182
2183 if (st->sb)
2184 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2185 data_offset);
2186
2187 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2188 pr_err("%s could not allocate superblock\n", __func__);
2189 return 0;
2190 }
2191 memset(ddf, 0, sizeof(*ddf));
2192 ddf->dlist = NULL; /* no physical disks yet */
2193 ddf->conflist = NULL; /* No virtual disks yet */
2194 st->sb = ddf;
2195
2196 if (info == NULL) {
2197 /* zeroing superblock */
2198 return 0;
2199 }
2200
2201 /* At least 32MB *must* be reserved for the ddf. So let's just
2202 * start 32MB from the end, and put the primary header there.
2203 * Don't do secondary for now.
2204 * We don't know exactly where that will be yet as it could be
2205 * different on each device. To just set up the lengths.
2206 *
2207 */
2208
2209 ddf->anchor.magic = DDF_HEADER_MAGIC;
2210 make_header_guid(ddf->anchor.guid);
2211
2212 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2213 ddf->anchor.seq = cpu_to_be32(1);
2214 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2215 ddf->anchor.openflag = 0xFF;
2216 ddf->anchor.foreignflag = 0;
2217 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2218 ddf->anchor.pad0 = 0xff;
2219 memset(ddf->anchor.pad1, 0xff, 12);
2220 memset(ddf->anchor.header_ext, 0xff, 32);
2221 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2222 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2223 ddf->anchor.type = DDF_HEADER_ANCHOR;
2224 memset(ddf->anchor.pad2, 0xff, 3);
2225 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2226 /* Put this at bottom of 32M reserved.. */
2227 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2228 max_phys_disks = 1023; /* Should be enough */
2229 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2230 max_virt_disks = 255;
2231 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks); /* ?? */
2232 ddf->anchor.max_partitions = cpu_to_be16(64); /* ?? */
2233 ddf->max_part = 64;
2234 ddf->mppe = 256;
2235 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2236 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2237 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2238 memset(ddf->anchor.pad3, 0xff, 54);
2239 /* controller sections is one sector long immediately
2240 * after the ddf header */
2241 sector = 1;
2242 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2243 ddf->anchor.controller_section_length = cpu_to_be32(1);
2244 sector += 1;
2245
2246 /* phys is 8 sectors after that */
2247 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2248 sizeof(struct phys_disk_entry)*max_phys_disks,
2249 512);
2250 switch(pdsize/512) {
2251 case 2: case 8: case 32: case 128: case 512: break;
2252 default: abort();
2253 }
2254 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2255 ddf->anchor.phys_section_length =
2256 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2257 sector += pdsize/512;
2258
2259 /* virt is another 32 sectors */
2260 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2261 sizeof(struct virtual_entry) * max_virt_disks,
2262 512);
2263 switch(vdsize/512) {
2264 case 2: case 8: case 32: case 128: case 512: break;
2265 default: abort();
2266 }
2267 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2268 ddf->anchor.virt_section_length =
2269 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2270 sector += vdsize/512;
2271
2272 clen = ddf->conf_rec_len * (ddf->max_part+1);
2273 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2274 ddf->anchor.config_section_length = cpu_to_be32(clen);
2275 sector += clen;
2276
2277 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2278 ddf->anchor.data_section_length = cpu_to_be32(1);
2279 sector += 1;
2280
2281 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2282 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2283 ddf->anchor.diag_space_length = cpu_to_be32(0);
2284 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2285 ddf->anchor.vendor_length = cpu_to_be32(0);
2286 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2287
2288 memset(ddf->anchor.pad4, 0xff, 256);
2289
2290 memcpy(&ddf->primary, &ddf->anchor, 512);
2291 memcpy(&ddf->secondary, &ddf->anchor, 512);
2292
2293 ddf->primary.openflag = 1; /* I guess.. */
2294 ddf->primary.type = DDF_HEADER_PRIMARY;
2295
2296 ddf->secondary.openflag = 1; /* I guess.. */
2297 ddf->secondary.type = DDF_HEADER_SECONDARY;
2298
2299 ddf->active = &ddf->primary;
2300
2301 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2302
2303 /* 24 more bytes of fiction required.
2304 * first 8 are a 'vendor-id' - "Linux-MD"
2305 * Remaining 16 are serial number.... maybe a hostname would do?
2306 */
2307 memcpy(ddf->controller.guid, T10, sizeof(T10));
2308 gethostname(hostname, sizeof(hostname));
2309 hostname[sizeof(hostname) - 1] = 0;
2310 hostlen = strlen(hostname);
2311 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2312 for (i = strlen(T10) ; i+hostlen < 24; i++)
2313 ddf->controller.guid[i] = ' ';
2314
2315 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2316 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2317 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2318 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2319 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2320 memset(ddf->controller.pad, 0xff, 8);
2321 memset(ddf->controller.vendor_data, 0xff, 448);
2322 if (homehost && strlen(homehost) < 440)
2323 strcpy((char*)ddf->controller.vendor_data, homehost);
2324
2325 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2326 pr_err("%s could not allocate pd\n", __func__);
2327 return 0;
2328 }
2329 ddf->phys = pd;
2330 ddf->pdsize = pdsize;
2331
2332 memset(pd, 0xff, pdsize);
2333 memset(pd, 0, sizeof(*pd));
2334 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2335 pd->used_pdes = cpu_to_be16(0);
2336 pd->max_pdes = cpu_to_be16(max_phys_disks);
2337 memset(pd->pad, 0xff, 52);
2338 for (i = 0; i < max_phys_disks; i++)
2339 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2340
2341 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2342 pr_err("%s could not allocate vd\n", __func__);
2343 return 0;
2344 }
2345 ddf->virt = vd;
2346 ddf->vdsize = vdsize;
2347 memset(vd, 0, vdsize);
2348 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2349 vd->populated_vdes = cpu_to_be16(0);
2350 vd->max_vdes = cpu_to_be16(max_virt_disks);
2351 memset(vd->pad, 0xff, 52);
2352
2353 for (i=0; i<max_virt_disks; i++)
2354 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2355
2356 st->sb = ddf;
2357 ddf_set_updates_pending(ddf);
2358 return 1;
2359 }
2360
2361 static int chunk_to_shift(int chunksize)
2362 {
2363 return ffs(chunksize/512)-1;
2364 }
2365
2366 #ifndef MDASSEMBLE
2367 struct extent {
2368 unsigned long long start, size;
2369 };
2370 static int cmp_extent(const void *av, const void *bv)
2371 {
2372 const struct extent *a = av;
2373 const struct extent *b = bv;
2374 if (a->start < b->start)
2375 return -1;
2376 if (a->start > b->start)
2377 return 1;
2378 return 0;
2379 }
2380
2381 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2382 {
2383 /* find a list of used extents on the give physical device
2384 * (dnum) of the given ddf.
2385 * Return a malloced array of 'struct extent'
2386
2387 * FIXME ignore DDF_Legacy devices?
2388
2389 */
2390 struct extent *rv;
2391 int n = 0;
2392 unsigned int i;
2393 __u16 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2394
2395 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2396 return NULL;
2397
2398 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2399
2400 for (i = 0; i < ddf->max_part; i++) {
2401 const struct vd_config *bvd;
2402 unsigned int ibvd;
2403 struct vcl *v = dl->vlist[i];
2404 if (v == NULL ||
2405 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2406 &bvd, &ibvd) == DDF_NOTFOUND)
2407 continue;
2408 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2409 rv[n].size = be64_to_cpu(bvd->blocks);
2410 n++;
2411 }
2412 qsort(rv, n, sizeof(*rv), cmp_extent);
2413
2414 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2415 rv[n].size = 0;
2416 return rv;
2417 }
2418 #endif
2419
2420 static int init_super_ddf_bvd(struct supertype *st,
2421 mdu_array_info_t *info,
2422 unsigned long long size,
2423 char *name, char *homehost,
2424 int *uuid, unsigned long long data_offset)
2425 {
2426 /* We are creating a BVD inside a pre-existing container.
2427 * so st->sb is already set.
2428 * We need to create a new vd_config and a new virtual_entry
2429 */
2430 struct ddf_super *ddf = st->sb;
2431 unsigned int venum, i;
2432 struct virtual_entry *ve;
2433 struct vcl *vcl;
2434 struct vd_config *vc;
2435
2436 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2437 pr_err("This ddf already has an array called %s\n", name);
2438 return 0;
2439 }
2440 venum = find_unused_vde(ddf);
2441 if (venum == DDF_NOTFOUND) {
2442 pr_err("Cannot find spare slot for virtual disk\n");
2443 return 0;
2444 }
2445 ve = &ddf->virt->entries[venum];
2446
2447 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2448 * timestamp, random number
2449 */
2450 make_header_guid(ve->guid);
2451 ve->unit = cpu_to_be16(info->md_minor);
2452 ve->pad0 = 0xFFFF;
2453 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2454 DDF_GUID_LEN);
2455 ve->type = cpu_to_be16(0);
2456 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2457 if (info->state & 1) /* clean */
2458 ve->init_state = DDF_init_full;
2459 else
2460 ve->init_state = DDF_init_not;
2461
2462 memset(ve->pad1, 0xff, 14);
2463 memset(ve->name, ' ', 16);
2464 if (name)
2465 strncpy(ve->name, name, 16);
2466 ddf->virt->populated_vdes =
2467 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2468
2469 /* Now create a new vd_config */
2470 if (posix_memalign((void**)&vcl, 512,
2471 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2472 pr_err("%s could not allocate vd_config\n", __func__);
2473 return 0;
2474 }
2475 vcl->vcnum = venum;
2476 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2477 vc = &vcl->conf;
2478
2479 vc->magic = DDF_VD_CONF_MAGIC;
2480 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2481 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2482 vc->seqnum = cpu_to_be32(1);
2483 memset(vc->pad0, 0xff, 24);
2484 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2485 if (layout_md2ddf(info, vc) == -1 ||
2486 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2487 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2488 __func__, info->level, info->layout, info->raid_disks);
2489 free(vcl);
2490 return 0;
2491 }
2492 vc->sec_elmnt_seq = 0;
2493 if (alloc_other_bvds(ddf, vcl) != 0) {
2494 pr_err("%s could not allocate other bvds\n",
2495 __func__);
2496 free(vcl);
2497 return 0;
2498 }
2499 vc->blocks = cpu_to_be64(info->size * 2);
2500 vc->array_blocks = cpu_to_be64(
2501 calc_array_size(info->level, info->raid_disks, info->layout,
2502 info->chunk_size, info->size*2));
2503 memset(vc->pad1, 0xff, 8);
2504 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2505 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2506 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2507 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2508 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2509 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2510 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2511 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2512 memset(vc->cache_pol, 0, 8);
2513 vc->bg_rate = 0x80;
2514 memset(vc->pad2, 0xff, 3);
2515 memset(vc->pad3, 0xff, 52);
2516 memset(vc->pad4, 0xff, 192);
2517 memset(vc->v0, 0xff, 32);
2518 memset(vc->v1, 0xff, 32);
2519 memset(vc->v2, 0xff, 16);
2520 memset(vc->v3, 0xff, 16);
2521 memset(vc->vendor, 0xff, 32);
2522
2523 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2524 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2525
2526 for (i = 1; i < vc->sec_elmnt_count; i++) {
2527 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2528 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2529 }
2530
2531 vcl->next = ddf->conflist;
2532 ddf->conflist = vcl;
2533 ddf->currentconf = vcl;
2534 ddf_set_updates_pending(ddf);
2535 return 1;
2536 }
2537
2538
2539 #ifndef MDASSEMBLE
2540 static int get_svd_state(const struct ddf_super *, const struct vcl *);
2541
2542 static void add_to_super_ddf_bvd(struct supertype *st,
2543 mdu_disk_info_t *dk, int fd, char *devname)
2544 {
2545 /* fd and devname identify a device with-in the ddf container (st).
2546 * dk identifies a location in the new BVD.
2547 * We need to find suitable free space in that device and update
2548 * the phys_refnum and lba_offset for the newly created vd_config.
2549 * We might also want to update the type in the phys_disk
2550 * section.
2551 *
2552 * Alternately: fd == -1 and we have already chosen which device to
2553 * use and recorded in dlist->raid_disk;
2554 */
2555 struct dl *dl;
2556 struct ddf_super *ddf = st->sb;
2557 struct vd_config *vc;
2558 unsigned int i;
2559 unsigned long long blocks, pos, esize;
2560 struct extent *ex;
2561 unsigned int raid_disk = dk->raid_disk;
2562
2563 if (fd == -1) {
2564 for (dl = ddf->dlist; dl ; dl = dl->next)
2565 if (dl->raiddisk == dk->raid_disk)
2566 break;
2567 } else {
2568 for (dl = ddf->dlist; dl ; dl = dl->next)
2569 if (dl->major == dk->major &&
2570 dl->minor == dk->minor)
2571 break;
2572 }
2573 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2574 return;
2575
2576 vc = &ddf->currentconf->conf;
2577 if (vc->sec_elmnt_count > 1) {
2578 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2579 if (raid_disk >= n)
2580 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2581 raid_disk %= n;
2582 }
2583
2584 ex = get_extents(ddf, dl);
2585 if (!ex)
2586 return;
2587
2588 i = 0; pos = 0;
2589 blocks = be64_to_cpu(vc->blocks);
2590 if (ddf->currentconf->block_sizes)
2591 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2592
2593 do {
2594 esize = ex[i].start - pos;
2595 if (esize >= blocks)
2596 break;
2597 pos = ex[i].start + ex[i].size;
2598 i++;
2599 } while (ex[i-1].size);
2600
2601 free(ex);
2602 if (esize < blocks)
2603 return;
2604
2605 ddf->currentdev = dk->raid_disk;
2606 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2607 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2608
2609 for (i = 0; i < ddf->max_part ; i++)
2610 if (dl->vlist[i] == NULL)
2611 break;
2612 if (i == ddf->max_part)
2613 return;
2614 dl->vlist[i] = ddf->currentconf;
2615
2616 if (fd >= 0)
2617 dl->fd = fd;
2618 if (devname)
2619 dl->devname = devname;
2620
2621 /* Check if we can mark array as optimal yet */
2622 i = ddf->currentconf->vcnum;
2623 ddf->virt->entries[i].state =
2624 (ddf->virt->entries[i].state & ~DDF_state_mask)
2625 | get_svd_state(ddf, ddf->currentconf);
2626 be16_clear(ddf->phys->entries[dl->pdnum].type,
2627 cpu_to_be16(DDF_Global_Spare));
2628 be16_set(ddf->phys->entries[dl->pdnum].type,
2629 cpu_to_be16(DDF_Active_in_VD));
2630 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2631 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2632 ddf->currentconf->vcnum, guid_str(vc->guid),
2633 dk->raid_disk);
2634 ddf_set_updates_pending(ddf);
2635 }
2636
2637 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2638 {
2639 unsigned int i;
2640 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2641 if (all_ff(ddf->phys->entries[i].guid))
2642 return i;
2643 }
2644 return DDF_NOTFOUND;
2645 }
2646
2647 /* add a device to a container, either while creating it or while
2648 * expanding a pre-existing container
2649 */
2650 static int add_to_super_ddf(struct supertype *st,
2651 mdu_disk_info_t *dk, int fd, char *devname,
2652 unsigned long long data_offset)
2653 {
2654 struct ddf_super *ddf = st->sb;
2655 struct dl *dd;
2656 time_t now;
2657 struct tm *tm;
2658 unsigned long long size;
2659 struct phys_disk_entry *pde;
2660 unsigned int n, i;
2661 struct stat stb;
2662 __u32 *tptr;
2663
2664 if (ddf->currentconf) {
2665 add_to_super_ddf_bvd(st, dk, fd, devname);
2666 return 0;
2667 }
2668
2669 /* This is device numbered dk->number. We need to create
2670 * a phys_disk entry and a more detailed disk_data entry.
2671 */
2672 fstat(fd, &stb);
2673 n = find_unused_pde(ddf);
2674 if (n == DDF_NOTFOUND) {
2675 pr_err("%s: No free slot in array, cannot add disk\n",
2676 __func__);
2677 return 1;
2678 }
2679 pde = &ddf->phys->entries[n];
2680 get_dev_size(fd, NULL, &size);
2681 if (size <= 32*1024*1024) {
2682 pr_err("%s: device size must be at least 32MB\n",
2683 __func__);
2684 return 1;
2685 }
2686 size >>= 9;
2687
2688 if (posix_memalign((void**)&dd, 512,
2689 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2690 pr_err("%s could allocate buffer for new disk, aborting\n",
2691 __func__);
2692 return 1;
2693 }
2694 dd->major = major(stb.st_rdev);
2695 dd->minor = minor(stb.st_rdev);
2696 dd->devname = devname;
2697 dd->fd = fd;
2698 dd->spare = NULL;
2699
2700 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2701 now = time(0);
2702 tm = localtime(&now);
2703 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2704 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2705 tptr = (__u32 *)(dd->disk.guid + 16);
2706 *tptr++ = random32();
2707 *tptr = random32();
2708
2709 do {
2710 /* Cannot be bothered finding a CRC of some irrelevant details*/
2711 dd->disk.refnum._v32 = random32();
2712 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2713 i > 0; i--)
2714 if (be32_eq(ddf->phys->entries[i-1].refnum,
2715 dd->disk.refnum))
2716 break;
2717 } while (i > 0);
2718
2719 dd->disk.forced_ref = 1;
2720 dd->disk.forced_guid = 1;
2721 memset(dd->disk.vendor, ' ', 32);
2722 memcpy(dd->disk.vendor, "Linux", 5);
2723 memset(dd->disk.pad, 0xff, 442);
2724 for (i = 0; i < ddf->max_part ; i++)
2725 dd->vlist[i] = NULL;
2726
2727 dd->pdnum = n;
2728
2729 if (st->update_tail) {
2730 int len = (sizeof(struct phys_disk) +
2731 sizeof(struct phys_disk_entry));
2732 struct phys_disk *pd;
2733
2734 pd = xmalloc(len);
2735 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2736 pd->used_pdes = cpu_to_be16(n);
2737 pde = &pd->entries[0];
2738 dd->mdupdate = pd;
2739 } else
2740 ddf->phys->used_pdes = cpu_to_be16(
2741 1 + be16_to_cpu(ddf->phys->used_pdes));
2742
2743 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2744 pde->refnum = dd->disk.refnum;
2745 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2746 pde->state = cpu_to_be16(DDF_Online);
2747 dd->size = size;
2748 /*
2749 * If there is already a device in dlist, try to reserve the same
2750 * amount of workspace. Otherwise, use 32MB.
2751 * We checked disk size above already.
2752 */
2753 #define __calc_lba(new, old, lba, mb) do { \
2754 unsigned long long dif; \
2755 if ((old) != NULL) \
2756 dif = (old)->size - be64_to_cpu((old)->lba); \
2757 else \
2758 dif = (new)->size; \
2759 if ((new)->size > dif) \
2760 (new)->lba = cpu_to_be64((new)->size - dif); \
2761 else \
2762 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2763 } while (0)
2764 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2765 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2766 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2767 pde->config_size = dd->workspace_lba;
2768
2769 sprintf(pde->path, "%17.17s","Information: nil") ;
2770 memset(pde->pad, 0xff, 6);
2771
2772 if (st->update_tail) {
2773 dd->next = ddf->add_list;
2774 ddf->add_list = dd;
2775 } else {
2776 dd->next = ddf->dlist;
2777 ddf->dlist = dd;
2778 ddf_set_updates_pending(ddf);
2779 }
2780
2781 return 0;
2782 }
2783
2784 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2785 {
2786 struct ddf_super *ddf = st->sb;
2787 struct dl *dl;
2788
2789 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2790 * disappeared from the container.
2791 * We need to arrange that it disappears from the metadata and
2792 * internal data structures too.
2793 * Most of the work is done by ddf_process_update which edits
2794 * the metadata and closes the file handle and attaches the memory
2795 * where free_updates will free it.
2796 */
2797 for (dl = ddf->dlist; dl ; dl = dl->next)
2798 if (dl->major == dk->major &&
2799 dl->minor == dk->minor)
2800 break;
2801 if (!dl)
2802 return -1;
2803
2804 if (st->update_tail) {
2805 int len = (sizeof(struct phys_disk) +
2806 sizeof(struct phys_disk_entry));
2807 struct phys_disk *pd;
2808
2809 pd = xmalloc(len);
2810 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2811 pd->used_pdes = cpu_to_be16(dl->pdnum);
2812 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2813 append_metadata_update(st, pd, len);
2814 }
2815 return 0;
2816 }
2817 #endif
2818
2819 /*
2820 * This is the write_init_super method for a ddf container. It is
2821 * called when creating a container or adding another device to a
2822 * container.
2823 */
2824 #define NULL_CONF_SZ 4096
2825
2826 static char *null_aligned;
2827 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type,
2828 int update)
2829 {
2830 unsigned long long sector;
2831 struct ddf_header *header;
2832 int fd, i, n_config, conf_size;
2833 int ret = 0;
2834
2835 if (null_aligned == NULL) {
2836 if (posix_memalign((void **)&null_aligned, 4096, NULL_CONF_SZ)
2837 != 0)
2838 return 0;
2839 memset(null_aligned, 0xff, NULL_CONF_SZ);
2840 }
2841
2842 fd = d->fd;
2843
2844 switch (type) {
2845 case DDF_HEADER_PRIMARY:
2846 header = &ddf->primary;
2847 sector = be64_to_cpu(header->primary_lba);
2848 break;
2849 case DDF_HEADER_SECONDARY:
2850 header = &ddf->secondary;
2851 sector = be64_to_cpu(header->secondary_lba);
2852 break;
2853 default:
2854 return 0;
2855 }
2856
2857 header->type = type;
2858 header->openflag = 1;
2859 header->crc = calc_crc(header, 512);
2860
2861 lseek64(fd, sector<<9, 0);
2862 if (write(fd, header, 512) < 0)
2863 goto out;
2864
2865 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2866 if (write(fd, &ddf->controller, 512) < 0)
2867 goto out;
2868
2869 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2870 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2871 goto out;
2872 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2873 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2874 goto out;
2875
2876 /* Now write lots of config records. */
2877 n_config = ddf->max_part;
2878 conf_size = ddf->conf_rec_len * 512;
2879 for (i = 0 ; i <= n_config ; i++) {
2880 struct vcl *c;
2881 struct vd_config *vdc = NULL;
2882 if (i == n_config) {
2883 c = (struct vcl *)d->spare;
2884 if (c)
2885 vdc = &c->conf;
2886 } else {
2887 unsigned int dummy;
2888 c = d->vlist[i];
2889 if (c)
2890 get_pd_index_from_refnum(
2891 c, d->disk.refnum,
2892 ddf->mppe,
2893 (const struct vd_config **)&vdc,
2894 &dummy);
2895 }
2896 if (c) {
2897 dprintf("writing conf record %i on disk %08x for %s/%u\n",
2898 i, be32_to_cpu(d->disk.refnum),
2899 guid_str(vdc->guid),
2900 vdc->sec_elmnt_seq);
2901 vdc->seqnum = header->seq;
2902 vdc->crc = calc_crc(vdc, conf_size);
2903 if (write(fd, vdc, conf_size) < 0)
2904 break;
2905 } else if (!update) {
2906 unsigned int togo = conf_size;
2907 while (togo > NULL_CONF_SZ) {
2908 if (write(fd, null_aligned, NULL_CONF_SZ) < 0)
2909 break;
2910 togo -= NULL_CONF_SZ;
2911 }
2912 if (write(fd, null_aligned, togo) < 0)
2913 break;
2914 } else
2915 lseek(fd, conf_size, SEEK_CUR);
2916 }
2917 if (i <= n_config)
2918 goto out;
2919
2920 d->disk.crc = calc_crc(&d->disk, 512);
2921 if (write(fd, &d->disk, 512) < 0)
2922 goto out;
2923
2924 ret = 1;
2925 out:
2926 header->openflag = 0;
2927 header->crc = calc_crc(header, 512);
2928
2929 lseek64(fd, sector<<9, 0);
2930 if (write(fd, header, 512) < 0)
2931 ret = 0;
2932
2933 return ret;
2934 }
2935
2936 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d,
2937 int update)
2938 {
2939 unsigned long long size;
2940 int fd = d->fd;
2941 if (fd < 0)
2942 return 0;
2943
2944 /* We need to fill in the primary, (secondary) and workspace
2945 * lba's in the headers, set their checksums,
2946 * Also checksum phys, virt....
2947 *
2948 * Then write everything out, finally the anchor is written.
2949 */
2950 get_dev_size(fd, NULL, &size);
2951 size /= 512;
2952 if (be64_to_cpu(d->workspace_lba) != 0ULL)
2953 ddf->anchor.workspace_lba = d->workspace_lba;
2954 else
2955 ddf->anchor.workspace_lba =
2956 cpu_to_be64(size - 32*1024*2);
2957 if (be64_to_cpu(d->primary_lba) != 0ULL)
2958 ddf->anchor.primary_lba = d->primary_lba;
2959 else
2960 ddf->anchor.primary_lba =
2961 cpu_to_be64(size - 16*1024*2);
2962 if (be64_to_cpu(d->secondary_lba) != 0ULL)
2963 ddf->anchor.secondary_lba = d->secondary_lba;
2964 else
2965 ddf->anchor.secondary_lba =
2966 cpu_to_be64(size - 32*1024*2);
2967 ddf->anchor.seq = ddf->active->seq;
2968 memcpy(&ddf->primary, &ddf->anchor, 512);
2969 memcpy(&ddf->secondary, &ddf->anchor, 512);
2970
2971 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
2972 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
2973 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
2974
2975 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY, update))
2976 return 0;
2977
2978 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY, update))
2979 return 0;
2980
2981 lseek64(fd, (size-1)*512, SEEK_SET);
2982 if (write(fd, &ddf->anchor, 512) < 0)
2983 return 0;
2984
2985 return 1;
2986 }
2987
2988 #ifndef MDASSEMBLE
2989 static int __write_init_super_ddf(struct supertype *st, int update)
2990 {
2991 struct ddf_super *ddf = st->sb;
2992 struct dl *d;
2993 int attempts = 0;
2994 int successes = 0;
2995
2996 pr_state(ddf, __func__);
2997
2998 /* try to write updated metadata,
2999 * if we catch a failure move on to the next disk
3000 */
3001 for (d = ddf->dlist; d; d=d->next) {
3002 attempts++;
3003 successes += _write_super_to_disk(ddf, d, update);
3004 }
3005
3006 return attempts != successes;
3007 }
3008
3009 static int write_init_super_ddf(struct supertype *st)
3010 {
3011 struct ddf_super *ddf = st->sb;
3012 struct vcl *currentconf = ddf->currentconf;
3013
3014 /* we are done with currentconf reset it to point st at the container */
3015 ddf->currentconf = NULL;
3016
3017 if (st->update_tail) {
3018 /* queue the virtual_disk and vd_config as metadata updates */
3019 struct virtual_disk *vd;
3020 struct vd_config *vc;
3021 int len, tlen;
3022 unsigned int i;
3023
3024 if (!currentconf) {
3025 int len = (sizeof(struct phys_disk) +
3026 sizeof(struct phys_disk_entry));
3027
3028 /* adding a disk to the container. */
3029 if (!ddf->add_list)
3030 return 0;
3031
3032 append_metadata_update(st, ddf->add_list->mdupdate, len);
3033 ddf->add_list->mdupdate = NULL;
3034 return 0;
3035 }
3036
3037 /* Newly created VD */
3038
3039 /* First the virtual disk. We have a slightly fake header */
3040 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3041 vd = xmalloc(len);
3042 *vd = *ddf->virt;
3043 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3044 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3045 append_metadata_update(st, vd, len);
3046
3047 /* Then the vd_config */
3048 len = ddf->conf_rec_len * 512;
3049 tlen = len * currentconf->conf.sec_elmnt_count;
3050 vc = xmalloc(tlen);
3051 memcpy(vc, &currentconf->conf, len);
3052 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3053 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3054 len);
3055 append_metadata_update(st, vc, tlen);
3056
3057 /* FIXME I need to close the fds! */
3058 return 0;
3059 } else {
3060 struct dl *d;
3061 if (!currentconf)
3062 for (d = ddf->dlist; d; d=d->next)
3063 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3064 return __write_init_super_ddf(st, 0);
3065 }
3066 }
3067
3068 #endif
3069
3070 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3071 unsigned long long data_offset)
3072 {
3073 /* We must reserve the last 32Meg */
3074 if (devsize <= 32*1024*2)
3075 return 0;
3076 return devsize - 32*1024*2;
3077 }
3078
3079 #ifndef MDASSEMBLE
3080
3081 static int reserve_space(struct supertype *st, int raiddisks,
3082 unsigned long long size, int chunk,
3083 unsigned long long *freesize)
3084 {
3085 /* Find 'raiddisks' spare extents at least 'size' big (but
3086 * only caring about multiples of 'chunk') and remember
3087 * them.
3088 * If the cannot be found, fail.
3089 */
3090 struct dl *dl;
3091 struct ddf_super *ddf = st->sb;
3092 int cnt = 0;
3093
3094 for (dl = ddf->dlist; dl ; dl=dl->next) {
3095 dl->raiddisk = -1;
3096 dl->esize = 0;
3097 }
3098 /* Now find largest extent on each device */
3099 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3100 struct extent *e = get_extents(ddf, dl);
3101 unsigned long long pos = 0;
3102 int i = 0;
3103 int found = 0;
3104 unsigned long long minsize = size;
3105
3106 if (size == 0)
3107 minsize = chunk;
3108
3109 if (!e)
3110 continue;
3111 do {
3112 unsigned long long esize;
3113 esize = e[i].start - pos;
3114 if (esize >= minsize) {
3115 found = 1;
3116 minsize = esize;
3117 }
3118 pos = e[i].start + e[i].size;
3119 i++;
3120 } while (e[i-1].size);
3121 if (found) {
3122 cnt++;
3123 dl->esize = minsize;
3124 }
3125 free(e);
3126 }
3127 if (cnt < raiddisks) {
3128 pr_err("not enough devices with space to create array.\n");
3129 return 0; /* No enough free spaces large enough */
3130 }
3131 if (size == 0) {
3132 /* choose the largest size of which there are at least 'raiddisk' */
3133 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3134 struct dl *dl2;
3135 if (dl->esize <= size)
3136 continue;
3137 /* This is bigger than 'size', see if there are enough */
3138 cnt = 0;
3139 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3140 if (dl2->esize >= dl->esize)
3141 cnt++;
3142 if (cnt >= raiddisks)
3143 size = dl->esize;
3144 }
3145 if (chunk) {
3146 size = size / chunk;
3147 size *= chunk;
3148 }
3149 *freesize = size;
3150 if (size < 32) {
3151 pr_err("not enough spare devices to create array.\n");
3152 return 0;
3153 }
3154 }
3155 /* We have a 'size' of which there are enough spaces.
3156 * We simply do a first-fit */
3157 cnt = 0;
3158 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3159 if (dl->esize < size)
3160 continue;
3161
3162 dl->raiddisk = cnt;
3163 cnt++;
3164 }
3165 return 1;
3166 }
3167
3168 static int
3169 validate_geometry_ddf_container(struct supertype *st,
3170 int level, int layout, int raiddisks,
3171 int chunk, unsigned long long size,
3172 unsigned long long data_offset,
3173 char *dev, unsigned long long *freesize,
3174 int verbose);
3175
3176 static int validate_geometry_ddf_bvd(struct supertype *st,
3177 int level, int layout, int raiddisks,
3178 int *chunk, unsigned long long size,
3179 unsigned long long data_offset,
3180 char *dev, unsigned long long *freesize,
3181 int verbose);
3182
3183 static int validate_geometry_ddf(struct supertype *st,
3184 int level, int layout, int raiddisks,
3185 int *chunk, unsigned long long size,
3186 unsigned long long data_offset,
3187 char *dev, unsigned long long *freesize,
3188 int verbose)
3189 {
3190 int fd;
3191 struct mdinfo *sra;
3192 int cfd;
3193
3194 /* ddf potentially supports lots of things, but it depends on
3195 * what devices are offered (and maybe kernel version?)
3196 * If given unused devices, we will make a container.
3197 * If given devices in a container, we will make a BVD.
3198 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3199 */
3200
3201 if (*chunk == UnSet)
3202 *chunk = DEFAULT_CHUNK;
3203
3204 if (level == -1000000) level = LEVEL_CONTAINER;
3205 if (level == LEVEL_CONTAINER) {
3206 /* Must be a fresh device to add to a container */
3207 return validate_geometry_ddf_container(st, level, layout,
3208 raiddisks, *chunk,
3209 size, data_offset, dev,
3210 freesize,
3211 verbose);
3212 }
3213
3214 if (!dev) {
3215 mdu_array_info_t array = {
3216 .level = level, .layout = layout,
3217 .raid_disks = raiddisks
3218 };
3219 struct vd_config conf;
3220 if (layout_md2ddf(&array, &conf) == -1) {
3221 if (verbose)
3222 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3223 level, layout, raiddisks);
3224 return 0;
3225 }
3226 /* Should check layout? etc */
3227
3228 if (st->sb && freesize) {
3229 /* --create was given a container to create in.
3230 * So we need to check that there are enough
3231 * free spaces and return the amount of space.
3232 * We may as well remember which drives were
3233 * chosen so that add_to_super/getinfo_super
3234 * can return them.
3235 */
3236 return reserve_space(st, raiddisks, size, *chunk, freesize);
3237 }
3238 return 1;
3239 }
3240
3241 if (st->sb) {
3242 /* A container has already been opened, so we are
3243 * creating in there. Maybe a BVD, maybe an SVD.
3244 * Should make a distinction one day.
3245 */
3246 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3247 chunk, size, data_offset, dev,
3248 freesize,
3249 verbose);
3250 }
3251 /* This is the first device for the array.
3252 * If it is a container, we read it in and do automagic allocations,
3253 * no other devices should be given.
3254 * Otherwise it must be a member device of a container, and we
3255 * do manual allocation.
3256 * Later we should check for a BVD and make an SVD.
3257 */
3258 fd = open(dev, O_RDONLY|O_EXCL, 0);
3259 if (fd >= 0) {
3260 sra = sysfs_read(fd, NULL, GET_VERSION);
3261 close(fd);
3262 if (sra && sra->array.major_version == -1 &&
3263 strcmp(sra->text_version, "ddf") == 0) {
3264
3265 /* load super */
3266 /* find space for 'n' devices. */
3267 /* remember the devices */
3268 /* Somehow return the fact that we have enough */
3269 }
3270
3271 if (verbose)
3272 pr_err("ddf: Cannot create this array "
3273 "on device %s - a container is required.\n",
3274 dev);
3275 return 0;
3276 }
3277 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3278 if (verbose)
3279 pr_err("ddf: Cannot open %s: %s\n",
3280 dev, strerror(errno));
3281 return 0;
3282 }
3283 /* Well, it is in use by someone, maybe a 'ddf' container. */
3284 cfd = open_container(fd);
3285 if (cfd < 0) {
3286 close(fd);
3287 if (verbose)
3288 pr_err("ddf: Cannot use %s: %s\n",
3289 dev, strerror(EBUSY));
3290 return 0;
3291 }
3292 sra = sysfs_read(cfd, NULL, GET_VERSION);
3293 close(fd);
3294 if (sra && sra->array.major_version == -1 &&
3295 strcmp(sra->text_version, "ddf") == 0) {
3296 /* This is a member of a ddf container. Load the container
3297 * and try to create a bvd
3298 */
3299 struct ddf_super *ddf;
3300 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3301 st->sb = ddf;
3302 strcpy(st->container_devnm, fd2devnm(cfd));
3303 close(cfd);
3304 return validate_geometry_ddf_bvd(st, level, layout,
3305 raiddisks, chunk, size,
3306 data_offset,
3307 dev, freesize,
3308 verbose);
3309 }
3310 close(cfd);
3311 } else /* device may belong to a different container */
3312 return 0;
3313
3314 return 1;
3315 }
3316
3317 static int
3318 validate_geometry_ddf_container(struct supertype *st,
3319 int level, int layout, int raiddisks,
3320 int chunk, unsigned long long size,
3321 unsigned long long data_offset,
3322 char *dev, unsigned long long *freesize,
3323 int verbose)
3324 {
3325 int fd;
3326 unsigned long long ldsize;
3327
3328 if (level != LEVEL_CONTAINER)
3329 return 0;
3330 if (!dev)
3331 return 1;
3332
3333 fd = open(dev, O_RDONLY|O_EXCL, 0);
3334 if (fd < 0) {
3335 if (verbose)
3336 pr_err("ddf: Cannot open %s: %s\n",
3337 dev, strerror(errno));
3338 return 0;
3339 }
3340 if (!get_dev_size(fd, dev, &ldsize)) {
3341 close(fd);
3342 return 0;
3343 }
3344 close(fd);
3345
3346 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3347 if (*freesize == 0)
3348 return 0;
3349
3350 return 1;
3351 }
3352
3353 static int validate_geometry_ddf_bvd(struct supertype *st,
3354 int level, int layout, int raiddisks,
3355 int *chunk, unsigned long long size,
3356 unsigned long long data_offset,
3357 char *dev, unsigned long long *freesize,
3358 int verbose)
3359 {
3360 struct stat stb;
3361 struct ddf_super *ddf = st->sb;
3362 struct dl *dl;
3363 unsigned long long pos = 0;
3364 unsigned long long maxsize;
3365 struct extent *e;
3366 int i;
3367 /* ddf/bvd supports lots of things, but not containers */
3368 if (level == LEVEL_CONTAINER) {
3369 if (verbose)
3370 pr_err("DDF cannot create a container within an container\n");
3371 return 0;
3372 }
3373 /* We must have the container info already read in. */
3374 if (!ddf)
3375 return 0;
3376
3377 if (!dev) {
3378 /* General test: make sure there is space for
3379 * 'raiddisks' device extents of size 'size'.
3380 */
3381 unsigned long long minsize = size;
3382 int dcnt = 0;
3383 if (minsize == 0)
3384 minsize = 8;
3385 for (dl = ddf->dlist; dl ; dl = dl->next)
3386 {
3387 int found = 0;
3388 pos = 0;
3389
3390 i = 0;
3391 e = get_extents(ddf, dl);
3392 if (!e) continue;
3393 do {
3394 unsigned long long esize;
3395 esize = e[i].start - pos;
3396 if (esize >= minsize)
3397 found = 1;
3398 pos = e[i].start + e[i].size;
3399 i++;
3400 } while (e[i-1].size);
3401 if (found)
3402 dcnt++;
3403 free(e);
3404 }
3405 if (dcnt < raiddisks) {
3406 if (verbose)
3407 pr_err("ddf: Not enough devices with "
3408 "space for this array (%d < %d)\n",
3409 dcnt, raiddisks);
3410 return 0;
3411 }
3412 return 1;
3413 }
3414 /* This device must be a member of the set */
3415 if (stat(dev, &stb) < 0)
3416 return 0;
3417 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3418 return 0;
3419 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3420 if (dl->major == (int)major(stb.st_rdev) &&
3421 dl->minor == (int)minor(stb.st_rdev))
3422 break;
3423 }
3424 if (!dl) {
3425 if (verbose)
3426 pr_err("ddf: %s is not in the "
3427 "same DDF set\n",
3428 dev);
3429 return 0;
3430 }
3431 e = get_extents(ddf, dl);
3432 maxsize = 0;
3433 i = 0;
3434 if (e) do {
3435 unsigned long long esize;
3436 esize = e[i].start - pos;
3437 if (esize >= maxsize)
3438 maxsize = esize;
3439 pos = e[i].start + e[i].size;
3440 i++;
3441 } while (e[i-1].size);
3442 *freesize = maxsize;
3443 // FIXME here I am
3444
3445 return 1;
3446 }
3447
3448 static int load_super_ddf_all(struct supertype *st, int fd,
3449 void **sbp, char *devname)
3450 {
3451 struct mdinfo *sra;
3452 struct ddf_super *super;
3453 struct mdinfo *sd, *best = NULL;
3454 int bestseq = 0;
3455 int seq;
3456 char nm[20];
3457 int dfd;
3458
3459 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3460 if (!sra)
3461 return 1;
3462 if (sra->array.major_version != -1 ||
3463 sra->array.minor_version != -2 ||
3464 strcmp(sra->text_version, "ddf") != 0)
3465 return 1;
3466
3467 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3468 return 1;
3469 memset(super, 0, sizeof(*super));
3470
3471 /* first, try each device, and choose the best ddf */
3472 for (sd = sra->devs ; sd ; sd = sd->next) {
3473 int rv;
3474 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3475 dfd = dev_open(nm, O_RDONLY);
3476 if (dfd < 0)
3477 return 2;
3478 rv = load_ddf_headers(dfd, super, NULL);
3479 close(dfd);
3480 if (rv == 0) {
3481 seq = be32_to_cpu(super->active->seq);
3482 if (super->active->openflag)
3483 seq--;
3484 if (!best || seq > bestseq) {
3485 bestseq = seq;
3486 best = sd;
3487 }
3488 }
3489 }
3490 if (!best)
3491 return 1;
3492 /* OK, load this ddf */
3493 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3494 dfd = dev_open(nm, O_RDONLY);
3495 if (dfd < 0)
3496 return 1;
3497 load_ddf_headers(dfd, super, NULL);
3498 load_ddf_global(dfd, super, NULL);
3499 close(dfd);
3500 /* Now we need the device-local bits */
3501 for (sd = sra->devs ; sd ; sd = sd->next) {
3502 int rv;
3503
3504 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3505 dfd = dev_open(nm, O_RDWR);
3506 if (dfd < 0)
3507 return 2;
3508 rv = load_ddf_headers(dfd, super, NULL);
3509 if (rv == 0)
3510 rv = load_ddf_local(dfd, super, NULL, 1);
3511 if (rv)
3512 return 1;
3513 }
3514
3515 *sbp = super;
3516 if (st->ss == NULL) {
3517 st->ss = &super_ddf;
3518 st->minor_version = 0;
3519 st->max_devs = 512;
3520 }
3521 strcpy(st->container_devnm, fd2devnm(fd));
3522 return 0;
3523 }
3524
3525 static int load_container_ddf(struct supertype *st, int fd,
3526 char *devname)
3527 {
3528 return load_super_ddf_all(st, fd, &st->sb, devname);
3529 }
3530
3531 #endif /* MDASSEMBLE */
3532
3533 static int check_secondary(const struct vcl *vc)
3534 {
3535 const struct vd_config *conf = &vc->conf;
3536 int i;
3537
3538 /* The only DDF secondary RAID level md can support is
3539 * RAID 10, if the stripe sizes and Basic volume sizes
3540 * are all equal.
3541 * Other configurations could in theory be supported by exposing
3542 * the BVDs to user space and using device mapper for the secondary
3543 * mapping. So far we don't support that.
3544 */
3545
3546 __u64 sec_elements[4] = {0, 0, 0, 0};
3547 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3548 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3549
3550 if (vc->other_bvds == NULL) {
3551 pr_err("No BVDs for secondary RAID found\n");
3552 return -1;
3553 }
3554 if (conf->prl != DDF_RAID1) {
3555 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3556 return -1;
3557 }
3558 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3559 pr_err("Secondary RAID level %d is unsupported\n",
3560 conf->srl);
3561 return -1;
3562 }
3563 __set_sec_seen(conf->sec_elmnt_seq);
3564 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3565 const struct vd_config *bvd = vc->other_bvds[i];
3566 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3567 continue;
3568 if (bvd->srl != conf->srl) {
3569 pr_err("Inconsistent secondary RAID level across BVDs\n");
3570 return -1;
3571 }
3572 if (bvd->prl != conf->prl) {
3573 pr_err("Different RAID levels for BVDs are unsupported\n");
3574 return -1;
3575 }
3576 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3577 pr_err("All BVDs must have the same number of primary elements\n");
3578 return -1;
3579 }
3580 if (bvd->chunk_shift != conf->chunk_shift) {
3581 pr_err("Different strip sizes for BVDs are unsupported\n");
3582 return -1;
3583 }
3584 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3585 pr_err("Different BVD sizes are unsupported\n");
3586 return -1;
3587 }
3588 __set_sec_seen(bvd->sec_elmnt_seq);
3589 }
3590 for (i = 0; i < conf->sec_elmnt_count; i++) {
3591 if (!__was_sec_seen(i)) {
3592 pr_err("BVD %d is missing\n", i);
3593 return -1;
3594 }
3595 }
3596 return 0;
3597 }
3598
3599 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3600 be32 refnum, unsigned int nmax,
3601 const struct vd_config **bvd,
3602 unsigned int *idx)
3603 {
3604 unsigned int i, j, n, sec, cnt;
3605
3606 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3607 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3608
3609 for (i = 0, j = 0 ; i < nmax ; i++) {
3610 /* j counts valid entries for this BVD */
3611 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3612 j++;
3613 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3614 *bvd = &vc->conf;
3615 *idx = i;
3616 return sec * cnt + j - 1;
3617 }
3618 }
3619 if (vc->other_bvds == NULL)
3620 goto bad;
3621
3622 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3623 struct vd_config *vd = vc->other_bvds[n-1];
3624 sec = vd->sec_elmnt_seq;
3625 if (sec == DDF_UNUSED_BVD)
3626 continue;
3627 for (i = 0, j = 0 ; i < nmax ; i++) {
3628 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3629 j++;
3630 if (be32_eq(vd->phys_refnum[i], refnum)) {
3631 *bvd = vd;
3632 *idx = i;
3633 return sec * cnt + j - 1;
3634 }
3635 }
3636 }
3637 bad:
3638 *bvd = NULL;
3639 return DDF_NOTFOUND;
3640 }
3641
3642 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3643 {
3644 /* Given a container loaded by load_super_ddf_all,
3645 * extract information about all the arrays into
3646 * an mdinfo tree.
3647 *
3648 * For each vcl in conflist: create an mdinfo, fill it in,
3649 * then look for matching devices (phys_refnum) in dlist
3650 * and create appropriate device mdinfo.
3651 */
3652 struct ddf_super *ddf = st->sb;
3653 struct mdinfo *rest = NULL;
3654 struct vcl *vc;
3655
3656 for (vc = ddf->conflist ; vc ; vc=vc->next)
3657 {
3658 unsigned int i;
3659 unsigned int j;
3660 struct mdinfo *this;
3661 char *ep;
3662 __u32 *cptr;
3663 unsigned int pd;
3664
3665 if (subarray &&
3666 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3667 *ep != '\0'))
3668 continue;
3669
3670 if (vc->conf.sec_elmnt_count > 1) {
3671 if (check_secondary(vc) != 0)
3672 continue;
3673 }
3674
3675 this = xcalloc(1, sizeof(*this));
3676 this->next = rest;
3677 rest = this;
3678
3679 if (layout_ddf2md(&vc->conf, &this->array))
3680 continue;
3681 this->array.md_minor = -1;
3682 this->array.major_version = -1;
3683 this->array.minor_version = -2;
3684 cptr = (__u32 *)(vc->conf.guid + 16);
3685 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3686 this->array.utime = DECADE +
3687 be32_to_cpu(vc->conf.timestamp);
3688 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3689
3690 i = vc->vcnum;
3691 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3692 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3693 DDF_init_full) {
3694 this->array.state = 0;
3695 this->resync_start = 0;
3696 } else {
3697 this->array.state = 1;
3698 this->resync_start = MaxSector;
3699 }
3700 memcpy(this->name, ddf->virt->entries[i].name, 16);
3701 this->name[16]=0;
3702 for(j=0; j<16; j++)
3703 if (this->name[j] == ' ')
3704 this->name[j] = 0;
3705
3706 memset(this->uuid, 0, sizeof(this->uuid));
3707 this->component_size = be64_to_cpu(vc->conf.blocks);
3708 this->array.size = this->component_size / 2;
3709 this->container_member = i;
3710
3711 ddf->currentconf = vc;
3712 uuid_from_super_ddf(st, this->uuid);
3713 if (!subarray)
3714 ddf->currentconf = NULL;
3715
3716 sprintf(this->text_version, "/%s/%d",
3717 st->container_devnm, this->container_member);
3718
3719 for (pd = 0; pd < be16_to_cpu(ddf->phys->used_pdes); pd++) {
3720 struct mdinfo *dev;
3721 struct dl *d;
3722 const struct vd_config *bvd;
3723 unsigned int iphys;
3724 int stt;
3725
3726 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3727 == 0xFFFFFFFF)
3728 continue;
3729
3730 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3731 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3732 != DDF_Online)
3733 continue;
3734
3735 i = get_pd_index_from_refnum(
3736 vc, ddf->phys->entries[pd].refnum,
3737 ddf->mppe, &bvd, &iphys);
3738 if (i == DDF_NOTFOUND)
3739 continue;
3740
3741 this->array.working_disks++;
3742
3743 for (d = ddf->dlist; d ; d=d->next)
3744 if (be32_eq(d->disk.refnum,
3745 ddf->phys->entries[pd].refnum))
3746 break;
3747 if (d == NULL)
3748 /* Haven't found that one yet, maybe there are others */
3749 continue;
3750
3751 dev = xcalloc(1, sizeof(*dev));
3752 dev->next = this->devs;
3753 this->devs = dev;
3754
3755 dev->disk.number = be32_to_cpu(d->disk.refnum);
3756 dev->disk.major = d->major;
3757 dev->disk.minor = d->minor;
3758 dev->disk.raid_disk = i;
3759 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3760 dev->recovery_start = MaxSector;
3761
3762 dev->events = be32_to_cpu(ddf->primary.seq);
3763 dev->data_offset =
3764 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3765 dev->component_size = be64_to_cpu(bvd->blocks);
3766 if (d->devname)
3767 strcpy(dev->name, d->devname);
3768 }
3769 }
3770 return rest;
3771 }
3772
3773 static int store_super_ddf(struct supertype *st, int fd)
3774 {
3775 struct ddf_super *ddf = st->sb;
3776 unsigned long long dsize;
3777 void *buf;
3778 int rc;
3779
3780 if (!ddf)
3781 return 1;
3782
3783 if (!get_dev_size(fd, NULL, &dsize))
3784 return 1;
3785
3786 if (ddf->dlist || ddf->conflist) {
3787 struct stat sta;
3788 struct dl *dl;
3789 int ofd, ret;
3790
3791 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3792 pr_err("%s: file descriptor for invalid device\n",
3793 __func__);
3794 return 1;
3795 }
3796 for (dl = ddf->dlist; dl; dl = dl->next)
3797 if (dl->major == (int)major(sta.st_rdev) &&
3798 dl->minor == (int)minor(sta.st_rdev))
3799 break;
3800 if (!dl) {
3801 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3802 (int)major(sta.st_rdev),
3803 (int)minor(sta.st_rdev));
3804 return 1;
3805 }
3806 ofd = dl->fd;
3807 dl->fd = fd;
3808 ret = (_write_super_to_disk(ddf, dl, 0) != 1);
3809 dl->fd = ofd;
3810 return ret;
3811 }
3812
3813 if (posix_memalign(&buf, 512, 512) != 0)
3814 return 1;
3815 memset(buf, 0, 512);
3816
3817 lseek64(fd, dsize-512, 0);
3818 rc = write(fd, buf, 512);
3819 free(buf);
3820 if (rc < 0)
3821 return 1;
3822 return 0;
3823 }
3824
3825 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3826 {
3827 /*
3828 * return:
3829 * 0 same, or first was empty, and second was copied
3830 * 1 second had wrong number
3831 * 2 wrong uuid
3832 * 3 wrong other info
3833 */
3834 struct ddf_super *first = st->sb;
3835 struct ddf_super *second = tst->sb;
3836 struct dl *dl1, *dl2;
3837 struct vcl *vl1, *vl2;
3838 unsigned int max_vds, max_pds, pd, vd;
3839
3840 if (!first) {
3841 st->sb = tst->sb;
3842 tst->sb = NULL;
3843 return 0;
3844 }
3845
3846 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3847 return 2;
3848
3849 if (!be32_eq(first->anchor.seq, second->anchor.seq)) {
3850 dprintf("%s: sequence number mismatch %u/%u\n", __func__,
3851 be32_to_cpu(first->anchor.seq),
3852 be32_to_cpu(second->anchor.seq));
3853 return 3;
3854 }
3855 if (first->max_part != second->max_part ||
3856 !be16_eq(first->phys->used_pdes, second->phys->used_pdes) ||
3857 !be16_eq(first->virt->populated_vdes,
3858 second->virt->populated_vdes)) {
3859 dprintf("%s: PD/VD number mismatch\n", __func__);
3860 return 3;
3861 }
3862
3863 max_pds = be16_to_cpu(first->phys->used_pdes);
3864 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3865 for (pd = 0; pd < max_pds; pd++)
3866 if (be32_eq(first->phys->entries[pd].refnum,
3867 dl2->disk.refnum))
3868 break;
3869 if (pd == max_pds) {
3870 dprintf("%s: no match for disk %08x\n", __func__,
3871 be32_to_cpu(dl2->disk.refnum));
3872 return 3;
3873 }
3874 }
3875
3876 max_vds = be16_to_cpu(first->active->max_vd_entries);
3877 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3878 if (!be32_eq(vl2->conf.magic, DDF_VD_CONF_MAGIC))
3879 continue;
3880 for (vd = 0; vd < max_vds; vd++)
3881 if (!memcmp(first->virt->entries[vd].guid,
3882 vl2->conf.guid, DDF_GUID_LEN))
3883 break;
3884 if (vd == max_vds) {
3885 dprintf("%s: no match for VD config\n", __func__);
3886 return 3;
3887 }
3888 }
3889 /* FIXME should I look at anything else? */
3890
3891 /*
3892 At this point we are fairly sure that the meta data matches.
3893 But the new disk may contain additional local data.
3894 Add it to the super block.
3895 */
3896 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3897 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3898 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3899 DDF_GUID_LEN))
3900 break;
3901 if (vl1) {
3902 if (vl1->other_bvds != NULL &&
3903 vl1->conf.sec_elmnt_seq !=
3904 vl2->conf.sec_elmnt_seq) {
3905 dprintf("%s: adding BVD %u\n", __func__,
3906 vl2->conf.sec_elmnt_seq);
3907 add_other_bvd(vl1, &vl2->conf,
3908 first->conf_rec_len*512);
3909 }
3910 continue;
3911 }
3912
3913 if (posix_memalign((void **)&vl1, 512,
3914 (first->conf_rec_len*512 +
3915 offsetof(struct vcl, conf))) != 0) {
3916 pr_err("%s could not allocate vcl buf\n",
3917 __func__);
3918 return 3;
3919 }
3920
3921 vl1->next = first->conflist;
3922 vl1->block_sizes = NULL;
3923 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3924 if (alloc_other_bvds(first, vl1) != 0) {
3925 pr_err("%s could not allocate other bvds\n",
3926 __func__);
3927 free(vl1);
3928 return 3;
3929 }
3930 for (vd = 0; vd < max_vds; vd++)
3931 if (!memcmp(first->virt->entries[vd].guid,
3932 vl1->conf.guid, DDF_GUID_LEN))
3933 break;
3934 vl1->vcnum = vd;
3935 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
3936 first->conflist = vl1;
3937 }
3938
3939 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3940 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3941 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
3942 break;
3943 if (dl1)
3944 continue;
3945
3946 if (posix_memalign((void **)&dl1, 512,
3947 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
3948 != 0) {
3949 pr_err("%s could not allocate disk info buffer\n",
3950 __func__);
3951 return 3;
3952 }
3953 memcpy(dl1, dl2, sizeof(*dl1));
3954 dl1->mdupdate = NULL;
3955 dl1->next = first->dlist;
3956 dl1->fd = -1;
3957 for (pd = 0; pd < max_pds; pd++)
3958 if (be32_eq(first->phys->entries[pd].refnum,
3959 dl1->disk.refnum))
3960 break;
3961 dl1->pdnum = pd;
3962 if (dl2->spare) {
3963 if (posix_memalign((void **)&dl1->spare, 512,
3964 first->conf_rec_len*512) != 0) {
3965 pr_err("%s could not allocate spare info buf\n",
3966 __func__);
3967 return 3;
3968 }
3969 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
3970 }
3971 for (vd = 0 ; vd < first->max_part ; vd++) {
3972 if (!dl2->vlist[vd]) {
3973 dl1->vlist[vd] = NULL;
3974 continue;
3975 }
3976 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
3977 if (!memcmp(vl1->conf.guid,
3978 dl2->vlist[vd]->conf.guid,
3979 DDF_GUID_LEN))
3980 break;
3981 dl1->vlist[vd] = vl1;
3982 }
3983 }
3984 first->dlist = dl1;
3985 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
3986 be32_to_cpu(dl1->disk.refnum));
3987 }
3988
3989 return 0;
3990 }
3991
3992 #ifndef MDASSEMBLE
3993 /*
3994 * A new array 'a' has been started which claims to be instance 'inst'
3995 * within container 'c'.
3996 * We need to confirm that the array matches the metadata in 'c' so
3997 * that we don't corrupt any metadata.
3998 */
3999 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4000 {
4001 struct ddf_super *ddf = c->sb;
4002 int n = atoi(inst);
4003 if (all_ff(ddf->virt->entries[n].guid)) {
4004 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4005 return -ENODEV;
4006 }
4007 dprintf("ddf: open_new %d\n", n);
4008 a->info.container_member = n;
4009 return 0;
4010 }
4011
4012 /*
4013 * The array 'a' is to be marked clean in the metadata.
4014 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4015 * clean up to the point (in sectors). If that cannot be recorded in the
4016 * metadata, then leave it as dirty.
4017 *
4018 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4019 * !global! virtual_disk.virtual_entry structure.
4020 */
4021 static int ddf_set_array_state(struct active_array *a, int consistent)
4022 {
4023 struct ddf_super *ddf = a->container->sb;
4024 int inst = a->info.container_member;
4025 int old = ddf->virt->entries[inst].state;
4026 if (consistent == 2) {
4027 /* Should check if a recovery should be started FIXME */
4028 consistent = 1;
4029 if (!is_resync_complete(&a->info))
4030 consistent = 0;
4031 }
4032 if (consistent)
4033 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4034 else
4035 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4036 if (old != ddf->virt->entries[inst].state)
4037 ddf_set_updates_pending(ddf);
4038
4039 old = ddf->virt->entries[inst].init_state;
4040 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4041 if (is_resync_complete(&a->info))
4042 ddf->virt->entries[inst].init_state |= DDF_init_full;
4043 else if (a->info.resync_start == 0)
4044 ddf->virt->entries[inst].init_state |= DDF_init_not;
4045 else
4046 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4047 if (old != ddf->virt->entries[inst].init_state)
4048 ddf_set_updates_pending(ddf);
4049
4050 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4051 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4052 consistent?"clean":"dirty",
4053 a->info.resync_start);
4054 return consistent;
4055 }
4056
4057 static int get_bvd_state(const struct ddf_super *ddf,
4058 const struct vd_config *vc)
4059 {
4060 unsigned int i, n_bvd, working = 0;
4061 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4062 int pd, st, state;
4063 for (i = 0; i < n_prim; i++) {
4064 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4065 continue;
4066 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4067 if (pd < 0)
4068 continue;
4069 st = be16_to_cpu(ddf->phys->entries[pd].state);
4070 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4071 == DDF_Online)
4072 working++;
4073 }
4074
4075 state = DDF_state_degraded;
4076 if (working == n_prim)
4077 state = DDF_state_optimal;
4078 else
4079 switch (vc->prl) {
4080 case DDF_RAID0:
4081 case DDF_CONCAT:
4082 case DDF_JBOD:
4083 state = DDF_state_failed;
4084 break;
4085 case DDF_RAID1:
4086 if (working == 0)
4087 state = DDF_state_failed;
4088 else if (working >= 2)
4089 state = DDF_state_part_optimal;
4090 break;
4091 case DDF_RAID4:
4092 case DDF_RAID5:
4093 if (working < n_prim - 1)
4094 state = DDF_state_failed;
4095 break;
4096 case DDF_RAID6:
4097 if (working < n_prim - 2)
4098 state = DDF_state_failed;
4099 else if (working == n_prim - 1)
4100 state = DDF_state_part_optimal;
4101 break;
4102 }
4103 return state;
4104 }
4105
4106 static int secondary_state(int state, int other, int seclevel)
4107 {
4108 if (state == DDF_state_optimal && other == DDF_state_optimal)
4109 return DDF_state_optimal;
4110 if (seclevel == DDF_2MIRRORED) {
4111 if (state == DDF_state_optimal || other == DDF_state_optimal)
4112 return DDF_state_part_optimal;
4113 if (state == DDF_state_failed && other == DDF_state_failed)
4114 return DDF_state_failed;
4115 return DDF_state_degraded;
4116 } else {
4117 if (state == DDF_state_failed || other == DDF_state_failed)
4118 return DDF_state_failed;
4119 if (state == DDF_state_degraded || other == DDF_state_degraded)
4120 return DDF_state_degraded;
4121 return DDF_state_part_optimal;
4122 }
4123 }
4124
4125 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4126 {
4127 int state = get_bvd_state(ddf, &vcl->conf);
4128 unsigned int i;
4129 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4130 state = secondary_state(
4131 state,
4132 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4133 vcl->conf.srl);
4134 }
4135 return state;
4136 }
4137
4138 /*
4139 * The state of each disk is stored in the global phys_disk structure
4140 * in phys_disk.entries[n].state.
4141 * This makes various combinations awkward.
4142 * - When a device fails in any array, it must be failed in all arrays
4143 * that include a part of this device.
4144 * - When a component is rebuilding, we cannot include it officially in the
4145 * array unless this is the only array that uses the device.
4146 *
4147 * So: when transitioning:
4148 * Online -> failed, just set failed flag. monitor will propagate
4149 * spare -> online, the device might need to be added to the array.
4150 * spare -> failed, just set failed. Don't worry if in array or not.
4151 */
4152 static void ddf_set_disk(struct active_array *a, int n, int state)
4153 {
4154 struct ddf_super *ddf = a->container->sb;
4155 unsigned int inst = a->info.container_member, n_bvd;
4156 struct vcl *vcl;
4157 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4158 &n_bvd, &vcl);
4159 int pd;
4160 struct mdinfo *mdi;
4161 struct dl *dl;
4162
4163 dprintf("%s: %d to %x\n", __func__, n, state);
4164 if (vc == NULL) {
4165 dprintf("ddf: cannot find instance %d!!\n", inst);
4166 return;
4167 }
4168 /* Find the matching slot in 'info'. */
4169 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4170 if (mdi->disk.raid_disk == n)
4171 break;
4172 if (!mdi) {
4173 pr_err("%s: cannot find raid disk %d\n",
4174 __func__, n);
4175 return;
4176 }
4177
4178 /* and find the 'dl' entry corresponding to that. */
4179 for (dl = ddf->dlist; dl; dl = dl->next)
4180 if (mdi->state_fd >= 0 &&
4181 mdi->disk.major == dl->major &&
4182 mdi->disk.minor == dl->minor)
4183 break;
4184 if (!dl) {
4185 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4186 __func__, n,
4187 mdi->disk.major, mdi->disk.minor);
4188 return;
4189 }
4190
4191 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4192 if (pd < 0 || pd != dl->pdnum) {
4193 /* disk doesn't currently exist or has changed.
4194 * If it is now in_sync, insert it. */
4195 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4196 __func__, dl->pdnum, dl->major, dl->minor,
4197 be32_to_cpu(dl->disk.refnum));
4198 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4199 __func__, inst, n_bvd,
4200 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4201 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4202 pd = dl->pdnum; /* FIXME: is this really correct ? */
4203 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4204 LBA_OFFSET(ddf, vc)[n_bvd] =
4205 cpu_to_be64(mdi->data_offset);
4206 be16_clear(ddf->phys->entries[pd].type,
4207 cpu_to_be16(DDF_Global_Spare));
4208 be16_set(ddf->phys->entries[pd].type,
4209 cpu_to_be16(DDF_Active_in_VD));
4210 ddf_set_updates_pending(ddf);
4211 }
4212 } else {
4213 be16 old = ddf->phys->entries[pd].state;
4214 if (state & DS_FAULTY)
4215 be16_set(ddf->phys->entries[pd].state,
4216 cpu_to_be16(DDF_Failed));
4217 if (state & DS_INSYNC) {
4218 be16_set(ddf->phys->entries[pd].state,
4219 cpu_to_be16(DDF_Online));
4220 be16_clear(ddf->phys->entries[pd].state,
4221 cpu_to_be16(DDF_Rebuilding));
4222 }
4223 if (!be16_eq(old, ddf->phys->entries[pd].state))
4224 ddf_set_updates_pending(ddf);
4225 }
4226
4227 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4228 be32_to_cpu(dl->disk.refnum), state,
4229 be16_to_cpu(ddf->phys->entries[pd].state));
4230
4231 /* Now we need to check the state of the array and update
4232 * virtual_disk.entries[n].state.
4233 * It needs to be one of "optimal", "degraded", "failed".
4234 * I don't understand 'deleted' or 'missing'.
4235 */
4236 state = get_svd_state(ddf, vcl);
4237
4238 if (ddf->virt->entries[inst].state !=
4239 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4240 | state)) {
4241
4242 ddf->virt->entries[inst].state =
4243 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4244 | state;
4245 ddf_set_updates_pending(ddf);
4246 }
4247
4248 }
4249
4250 static void ddf_sync_metadata(struct supertype *st)
4251 {
4252
4253 /*
4254 * Write all data to all devices.
4255 * Later, we might be able to track whether only local changes
4256 * have been made, or whether any global data has been changed,
4257 * but ddf is sufficiently weird that it probably always
4258 * changes global data ....
4259 */
4260 struct ddf_super *ddf = st->sb;
4261 if (!ddf->updates_pending)
4262 return;
4263 ddf->updates_pending = 0;
4264 __write_init_super_ddf(st, 1);
4265 dprintf("ddf: sync_metadata\n");
4266 }
4267
4268 static int del_from_conflist(struct vcl **list, const char *guid)
4269 {
4270 struct vcl **p;
4271 int found = 0;
4272 for (p = list; p && *p; p = &((*p)->next))
4273 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4274 found = 1;
4275 *p = (*p)->next;
4276 }
4277 return found;
4278 }
4279
4280 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4281 {
4282 struct dl *dl;
4283 unsigned int vdnum, i;
4284 vdnum = find_vde_by_guid(ddf, guid);
4285 if (vdnum == DDF_NOTFOUND) {
4286 pr_err("%s: could not find VD %s\n", __func__,
4287 guid_str(guid));
4288 return -1;
4289 }
4290 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4291 pr_err("%s: could not find conf %s\n", __func__,
4292 guid_str(guid));
4293 return -1;
4294 }
4295 for (dl = ddf->dlist; dl; dl = dl->next)
4296 for (i = 0; i < ddf->max_part; i++)
4297 if (dl->vlist[i] != NULL &&
4298 !memcmp(dl->vlist[i]->conf.guid, guid,
4299 DDF_GUID_LEN))
4300 dl->vlist[i] = NULL;
4301 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4302 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4303 return 0;
4304 }
4305
4306 static int kill_subarray_ddf(struct supertype *st)
4307 {
4308 struct ddf_super *ddf = st->sb;
4309 /*
4310 * currentconf is set in container_content_ddf,
4311 * called with subarray arg
4312 */
4313 struct vcl *victim = ddf->currentconf;
4314 struct vd_config *conf;
4315 ddf->currentconf = NULL;
4316 unsigned int vdnum;
4317 if (!victim) {
4318 pr_err("%s: nothing to kill\n", __func__);
4319 return -1;
4320 }
4321 conf = &victim->conf;
4322 vdnum = find_vde_by_guid(ddf, conf->guid);
4323 if (vdnum == DDF_NOTFOUND) {
4324 pr_err("%s: could not find VD %s\n", __func__,
4325 guid_str(conf->guid));
4326 return -1;
4327 }
4328 if (st->update_tail) {
4329 struct virtual_disk *vd;
4330 int len = sizeof(struct virtual_disk)
4331 + sizeof(struct virtual_entry);
4332 vd = xmalloc(len);
4333 if (vd == NULL) {
4334 pr_err("%s: failed to allocate %d bytes\n", __func__,
4335 len);
4336 return -1;
4337 }
4338 memset(vd, 0 , len);
4339 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4340 vd->populated_vdes = cpu_to_be16(0);
4341 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4342 /* we use DDF_state_deleted as marker */
4343 vd->entries[0].state = DDF_state_deleted;
4344 append_metadata_update(st, vd, len);
4345 } else {
4346 _kill_subarray_ddf(ddf, conf->guid);
4347 ddf_set_updates_pending(ddf);
4348 ddf_sync_metadata(st);
4349 }
4350 return 0;
4351 }
4352
4353 static void copy_matching_bvd(struct ddf_super *ddf,
4354 struct vd_config *conf,
4355 const struct metadata_update *update)
4356 {
4357 unsigned int mppe =
4358 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4359 unsigned int len = ddf->conf_rec_len * 512;
4360 char *p;
4361 struct vd_config *vc;
4362 for (p = update->buf; p < update->buf + update->len; p += len) {
4363 vc = (struct vd_config *) p;
4364 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4365 memcpy(conf->phys_refnum, vc->phys_refnum,
4366 mppe * (sizeof(__u32) + sizeof(__u64)));
4367 return;
4368 }
4369 }
4370 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4371 conf->sec_elmnt_seq, guid_str(conf->guid));
4372 }
4373
4374 static void ddf_process_update(struct supertype *st,
4375 struct metadata_update *update)
4376 {
4377 /* Apply this update to the metadata.
4378 * The first 4 bytes are a DDF_*_MAGIC which guides
4379 * our actions.
4380 * Possible update are:
4381 * DDF_PHYS_RECORDS_MAGIC
4382 * Add a new physical device or remove an old one.
4383 * Changes to this record only happen implicitly.
4384 * used_pdes is the device number.
4385 * DDF_VIRT_RECORDS_MAGIC
4386 * Add a new VD. Possibly also change the 'access' bits.
4387 * populated_vdes is the entry number.
4388 * DDF_VD_CONF_MAGIC
4389 * New or updated VD. the VIRT_RECORD must already
4390 * exist. For an update, phys_refnum and lba_offset
4391 * (at least) are updated, and the VD_CONF must
4392 * be written to precisely those devices listed with
4393 * a phys_refnum.
4394 * DDF_SPARE_ASSIGN_MAGIC
4395 * replacement Spare Assignment Record... but for which device?
4396 *
4397 * So, e.g.:
4398 * - to create a new array, we send a VIRT_RECORD and
4399 * a VD_CONF. Then assemble and start the array.
4400 * - to activate a spare we send a VD_CONF to add the phys_refnum
4401 * and offset. This will also mark the spare as active with
4402 * a spare-assignment record.
4403 */
4404 struct ddf_super *ddf = st->sb;
4405 be32 *magic = (be32 *)update->buf;
4406 struct phys_disk *pd;
4407 struct virtual_disk *vd;
4408 struct vd_config *vc;
4409 struct vcl *vcl;
4410 struct dl *dl;
4411 unsigned int ent;
4412 unsigned int pdnum, pd2, len;
4413
4414 dprintf("Process update %x\n", be32_to_cpu(*magic));
4415
4416 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4417
4418 if (update->len != (sizeof(struct phys_disk) +
4419 sizeof(struct phys_disk_entry)))
4420 return;
4421 pd = (struct phys_disk*)update->buf;
4422
4423 ent = be16_to_cpu(pd->used_pdes);
4424 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4425 return;
4426 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4427 struct dl **dlp;
4428 /* removing this disk. */
4429 be16_set(ddf->phys->entries[ent].state,
4430 cpu_to_be16(DDF_Missing));
4431 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4432 struct dl *dl = *dlp;
4433 if (dl->pdnum == (signed)ent) {
4434 close(dl->fd);
4435 dl->fd = -1;
4436 /* FIXME this doesn't free
4437 * dl->devname */
4438 update->space = dl;
4439 *dlp = dl->next;
4440 break;
4441 }
4442 }
4443 ddf_set_updates_pending(ddf);
4444 return;
4445 }
4446 if (!all_ff(ddf->phys->entries[ent].guid))
4447 return;
4448 ddf->phys->entries[ent] = pd->entries[0];
4449 ddf->phys->used_pdes = cpu_to_be16
4450 (1 + be16_to_cpu(ddf->phys->used_pdes));
4451 ddf_set_updates_pending(ddf);
4452 if (ddf->add_list) {
4453 struct active_array *a;
4454 struct dl *al = ddf->add_list;
4455 ddf->add_list = al->next;
4456
4457 al->next = ddf->dlist;
4458 ddf->dlist = al;
4459
4460 /* As a device has been added, we should check
4461 * for any degraded devices that might make
4462 * use of this spare */
4463 for (a = st->arrays ; a; a=a->next)
4464 a->check_degraded = 1;
4465 }
4466 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4467
4468 if (update->len != (sizeof(struct virtual_disk) +
4469 sizeof(struct virtual_entry)))
4470 return;
4471 vd = (struct virtual_disk*)update->buf;
4472
4473 if (vd->entries[0].state == DDF_state_deleted) {
4474 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4475 return;
4476 } else {
4477
4478 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4479 if (ent != DDF_NOTFOUND) {
4480 dprintf("%s: VD %s exists already in slot %d\n",
4481 __func__, guid_str(vd->entries[0].guid),
4482 ent);
4483 return;
4484 }
4485 ent = find_unused_vde(ddf);
4486 if (ent == DDF_NOTFOUND)
4487 return;
4488 ddf->virt->entries[ent] = vd->entries[0];
4489 ddf->virt->populated_vdes =
4490 cpu_to_be16(
4491 1 + be16_to_cpu(
4492 ddf->virt->populated_vdes));
4493 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4494 __func__, guid_str(vd->entries[0].guid), ent,
4495 ddf->virt->entries[ent].state,
4496 ddf->virt->entries[ent].init_state);
4497 }
4498 ddf_set_updates_pending(ddf);
4499 }
4500
4501 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4502 vc = (struct vd_config*)update->buf;
4503 len = ddf->conf_rec_len * 512;
4504 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4505 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4506 __func__, guid_str(vc->guid), update->len,
4507 vc->sec_elmnt_count);
4508 return;
4509 }
4510 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4511 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4512 break;
4513 dprintf("%s: conf update for %s (%s)\n", __func__,
4514 guid_str(vc->guid), (vcl ? "old" : "new"));
4515 if (vcl) {
4516 /* An update, just copy the phys_refnum and lba_offset
4517 * fields
4518 */
4519 unsigned int i;
4520 unsigned int k;
4521 copy_matching_bvd(ddf, &vcl->conf, update);
4522 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4523 dprintf("BVD %u has %08x at %llu\n", 0,
4524 be32_to_cpu(vcl->conf.phys_refnum[k]),
4525 be64_to_cpu(LBA_OFFSET(ddf,
4526 &vcl->conf)[k]));
4527 for (i = 1; i < vc->sec_elmnt_count; i++) {
4528 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4529 update);
4530 for (k = 0; k < be16_to_cpu(
4531 vc->prim_elmnt_count); k++)
4532 dprintf("BVD %u has %08x at %llu\n", i,
4533 be32_to_cpu
4534 (vcl->other_bvds[i-1]->
4535 phys_refnum[k]),
4536 be64_to_cpu
4537 (LBA_OFFSET
4538 (ddf,
4539 vcl->other_bvds[i-1])[k]));
4540 }
4541 } else {
4542 /* A new VD_CONF */
4543 unsigned int i;
4544 if (!update->space)
4545 return;
4546 vcl = update->space;
4547 update->space = NULL;
4548 vcl->next = ddf->conflist;
4549 memcpy(&vcl->conf, vc, len);
4550 ent = find_vde_by_guid(ddf, vc->guid);
4551 if (ent == DDF_NOTFOUND)
4552 return;
4553 vcl->vcnum = ent;
4554 ddf->conflist = vcl;
4555 for (i = 1; i < vc->sec_elmnt_count; i++)
4556 memcpy(vcl->other_bvds[i-1],
4557 update->buf + len * i, len);
4558 }
4559 /* Set DDF_Transition on all Failed devices - to help
4560 * us detect those that are no longer in use
4561 */
4562 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4563 pdnum++)
4564 if (be16_and(ddf->phys->entries[pdnum].state,
4565 cpu_to_be16(DDF_Failed)))
4566 be16_set(ddf->phys->entries[pdnum].state,
4567 cpu_to_be16(DDF_Transition));
4568 /* Now make sure vlist is correct for each dl. */
4569 for (dl = ddf->dlist; dl; dl = dl->next) {
4570 unsigned int vn = 0;
4571 int in_degraded = 0;
4572 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4573 unsigned int dn, ibvd;
4574 const struct vd_config *conf;
4575 int vstate;
4576 dn = get_pd_index_from_refnum(vcl,
4577 dl->disk.refnum,
4578 ddf->mppe,
4579 &conf, &ibvd);
4580 if (dn == DDF_NOTFOUND)
4581 continue;
4582 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4583 dl->pdnum,
4584 be32_to_cpu(dl->disk.refnum),
4585 guid_str(conf->guid),
4586 conf->sec_elmnt_seq, vn);
4587 /* Clear the Transition flag */
4588 if (be16_and
4589 (ddf->phys->entries[dl->pdnum].state,
4590 cpu_to_be16(DDF_Failed)))
4591 be16_clear(ddf->phys
4592 ->entries[dl->pdnum].state,
4593 cpu_to_be16(DDF_Transition));
4594 dl->vlist[vn++] = vcl;
4595 vstate = ddf->virt->entries[vcl->vcnum].state
4596 & DDF_state_mask;
4597 if (vstate == DDF_state_degraded ||
4598 vstate == DDF_state_part_optimal)
4599 in_degraded = 1;
4600 }
4601 while (vn < ddf->max_part)
4602 dl->vlist[vn++] = NULL;
4603 if (dl->vlist[0]) {
4604 be16_clear(ddf->phys->entries[dl->pdnum].type,
4605 cpu_to_be16(DDF_Global_Spare));
4606 if (!be16_and(ddf->phys
4607 ->entries[dl->pdnum].type,
4608 cpu_to_be16(DDF_Active_in_VD))) {
4609 be16_set(ddf->phys
4610 ->entries[dl->pdnum].type,
4611 cpu_to_be16(DDF_Active_in_VD));
4612 if (in_degraded)
4613 be16_set(ddf->phys
4614 ->entries[dl->pdnum]
4615 .state,
4616 cpu_to_be16
4617 (DDF_Rebuilding));
4618 }
4619 }
4620 if (dl->spare) {
4621 be16_clear(ddf->phys->entries[dl->pdnum].type,
4622 cpu_to_be16(DDF_Global_Spare));
4623 be16_set(ddf->phys->entries[dl->pdnum].type,
4624 cpu_to_be16(DDF_Spare));
4625 }
4626 if (!dl->vlist[0] && !dl->spare) {
4627 be16_set(ddf->phys->entries[dl->pdnum].type,
4628 cpu_to_be16(DDF_Global_Spare));
4629 be16_clear(ddf->phys->entries[dl->pdnum].type,
4630 cpu_to_be16(DDF_Spare));
4631 be16_clear(ddf->phys->entries[dl->pdnum].type,
4632 cpu_to_be16(DDF_Active_in_VD));
4633 }
4634 }
4635
4636 /* Now remove any 'Failed' devices that are not part
4637 * of any VD. They will have the Transition flag set.
4638 * Once done, we need to update all dl->pdnum numbers.
4639 */
4640 pd2 = 0;
4641 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4642 pdnum++) {
4643 if (be16_and(ddf->phys->entries[pdnum].state,
4644 cpu_to_be16(DDF_Failed))
4645 && be16_and(ddf->phys->entries[pdnum].state,
4646 cpu_to_be16(DDF_Transition))) {
4647 /* skip this one unless in dlist*/
4648 for (dl = ddf->dlist; dl; dl = dl->next)
4649 if (dl->pdnum == (int)pdnum)
4650 break;
4651 if (!dl)
4652 continue;
4653 }
4654 if (pdnum == pd2)
4655 pd2++;
4656 else {
4657 ddf->phys->entries[pd2] =
4658 ddf->phys->entries[pdnum];
4659 for (dl = ddf->dlist; dl; dl = dl->next)
4660 if (dl->pdnum == (int)pdnum)
4661 dl->pdnum = pd2;
4662 pd2++;
4663 }
4664 }
4665 ddf->phys->used_pdes = cpu_to_be16(pd2);
4666 while (pd2 < pdnum) {
4667 memset(ddf->phys->entries[pd2].guid, 0xff,
4668 DDF_GUID_LEN);
4669 pd2++;
4670 }
4671
4672 ddf_set_updates_pending(ddf);
4673 }
4674 /* case DDF_SPARE_ASSIGN_MAGIC */
4675 }
4676
4677 static void ddf_prepare_update(struct supertype *st,
4678 struct metadata_update *update)
4679 {
4680 /* This update arrived at managemon.
4681 * We are about to pass it to monitor.
4682 * If a malloc is needed, do it here.
4683 */
4684 struct ddf_super *ddf = st->sb;
4685 be32 *magic = (be32 *)update->buf;
4686 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4687 struct vcl *vcl;
4688 struct vd_config *conf = (struct vd_config *) update->buf;
4689 if (posix_memalign(&update->space, 512,
4690 offsetof(struct vcl, conf)
4691 + ddf->conf_rec_len * 512) != 0) {
4692 update->space = NULL;
4693 return;
4694 }
4695 vcl = update->space;
4696 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4697 if (alloc_other_bvds(ddf, vcl) != 0) {
4698 free(update->space);
4699 update->space = NULL;
4700 }
4701 }
4702 }
4703
4704 /*
4705 * Check degraded state of a RAID10.
4706 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4707 */
4708 static int raid10_degraded(struct mdinfo *info)
4709 {
4710 int n_prim, n_bvds;
4711 int i;
4712 struct mdinfo *d;
4713 char *found;
4714 int ret = -1;
4715
4716 n_prim = info->array.layout & ~0x100;
4717 n_bvds = info->array.raid_disks / n_prim;
4718 found = xmalloc(n_bvds);
4719 if (found == NULL)
4720 return ret;
4721 memset(found, 0, n_bvds);
4722 for (d = info->devs; d; d = d->next) {
4723 i = d->disk.raid_disk / n_prim;
4724 if (i >= n_bvds) {
4725 pr_err("%s: BUG: invalid raid disk\n", __func__);
4726 goto out;
4727 }
4728 if (d->state_fd > 0)
4729 found[i]++;
4730 }
4731 ret = 2;
4732 for (i = 0; i < n_bvds; i++)
4733 if (!found[i]) {
4734 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4735 ret = 0;
4736 goto out;
4737 } else if (found[i] < n_prim) {
4738 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4739 n_bvds);
4740 ret = 1;
4741 }
4742 out:
4743 free(found);
4744 return ret;
4745 }
4746
4747 /*
4748 * Check if the array 'a' is degraded but not failed.
4749 * If it is, find as many spares as are available and needed and
4750 * arrange for their inclusion.
4751 * We only choose devices which are not already in the array,
4752 * and prefer those with a spare-assignment to this array.
4753 * otherwise we choose global spares - assuming always that
4754 * there is enough room.
4755 * For each spare that we assign, we return an 'mdinfo' which
4756 * describes the position for the device in the array.
4757 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4758 * the new phys_refnum and lba_offset values.
4759 *
4760 * Only worry about BVDs at the moment.
4761 */
4762 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4763 struct metadata_update **updates)
4764 {
4765 int working = 0;
4766 struct mdinfo *d;
4767 struct ddf_super *ddf = a->container->sb;
4768 int global_ok = 0;
4769 struct mdinfo *rv = NULL;
4770 struct mdinfo *di;
4771 struct metadata_update *mu;
4772 struct dl *dl;
4773 int i;
4774 unsigned int j;
4775 struct vcl *vcl;
4776 struct vd_config *vc;
4777 unsigned int n_bvd;
4778
4779 for (d = a->info.devs ; d ; d = d->next) {
4780 if ((d->curr_state & DS_FAULTY) &&
4781 d->state_fd >= 0)
4782 /* wait for Removal to happen */
4783 return NULL;
4784 if (d->state_fd >= 0)
4785 working ++;
4786 }
4787
4788 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4789 a->info.array.raid_disks,
4790 a->info.array.level);
4791 if (working == a->info.array.raid_disks)
4792 return NULL; /* array not degraded */
4793 switch (a->info.array.level) {
4794 case 1:
4795 if (working == 0)
4796 return NULL; /* failed */
4797 break;
4798 case 4:
4799 case 5:
4800 if (working < a->info.array.raid_disks - 1)
4801 return NULL; /* failed */
4802 break;
4803 case 6:
4804 if (working < a->info.array.raid_disks - 2)
4805 return NULL; /* failed */
4806 break;
4807 case 10:
4808 if (raid10_degraded(&a->info) < 1)
4809 return NULL;
4810 break;
4811 default: /* concat or stripe */
4812 return NULL; /* failed */
4813 }
4814
4815 /* For each slot, if it is not working, find a spare */
4816 dl = ddf->dlist;
4817 for (i = 0; i < a->info.array.raid_disks; i++) {
4818 for (d = a->info.devs ; d ; d = d->next)
4819 if (d->disk.raid_disk == i)
4820 break;
4821 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4822 if (d && (d->state_fd >= 0))
4823 continue;
4824
4825 /* OK, this device needs recovery. Find a spare */
4826 again:
4827 for ( ; dl ; dl = dl->next) {
4828 unsigned long long esize;
4829 unsigned long long pos;
4830 struct mdinfo *d2;
4831 int is_global = 0;
4832 int is_dedicated = 0;
4833 struct extent *ex;
4834 unsigned int j;
4835 be16 state = ddf->phys->entries[dl->pdnum].state;
4836 if (be16_and(state,
4837 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4838 !be16_and(state,
4839 cpu_to_be16(DDF_Online)))
4840 continue;
4841
4842 /* If in this array, skip */
4843 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4844 if (d2->state_fd >= 0 &&
4845 d2->disk.major == dl->major &&
4846 d2->disk.minor == dl->minor) {
4847 dprintf("%x:%x (%08x) already in array\n",
4848 dl->major, dl->minor,
4849 be32_to_cpu(dl->disk.refnum));
4850 break;
4851 }
4852 if (d2)
4853 continue;
4854 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4855 cpu_to_be16(DDF_Spare))) {
4856 /* Check spare assign record */
4857 if (dl->spare) {
4858 if (dl->spare->type & DDF_spare_dedicated) {
4859 /* check spare_ents for guid */
4860 for (j = 0 ;
4861 j < be16_to_cpu
4862 (dl->spare
4863 ->populated);
4864 j++) {
4865 if (memcmp(dl->spare->spare_ents[j].guid,
4866 ddf->virt->entries[a->info.container_member].guid,
4867 DDF_GUID_LEN) == 0)
4868 is_dedicated = 1;
4869 }
4870 } else
4871 is_global = 1;
4872 }
4873 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
4874 cpu_to_be16(DDF_Global_Spare))) {
4875 is_global = 1;
4876 } else if (!be16_and(ddf->phys
4877 ->entries[dl->pdnum].state,
4878 cpu_to_be16(DDF_Failed))) {
4879 /* we can possibly use some of this */
4880 is_global = 1;
4881 }
4882 if ( ! (is_dedicated ||
4883 (is_global && global_ok))) {
4884 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4885 is_dedicated, is_global);
4886 continue;
4887 }
4888
4889 /* We are allowed to use this device - is there space?
4890 * We need a->info.component_size sectors */
4891 ex = get_extents(ddf, dl);
4892 if (!ex) {
4893 dprintf("cannot get extents\n");
4894 continue;
4895 }
4896 j = 0; pos = 0;
4897 esize = 0;
4898
4899 do {
4900 esize = ex[j].start - pos;
4901 if (esize >= a->info.component_size)
4902 break;
4903 pos = ex[j].start + ex[j].size;
4904 j++;
4905 } while (ex[j-1].size);
4906
4907 free(ex);
4908 if (esize < a->info.component_size) {
4909 dprintf("%x:%x has no room: %llu %llu\n",
4910 dl->major, dl->minor,
4911 esize, a->info.component_size);
4912 /* No room */
4913 continue;
4914 }
4915
4916 /* Cool, we have a device with some space at pos */
4917 di = xcalloc(1, sizeof(*di));
4918 di->disk.number = i;
4919 di->disk.raid_disk = i;
4920 di->disk.major = dl->major;
4921 di->disk.minor = dl->minor;
4922 di->disk.state = 0;
4923 di->recovery_start = 0;
4924 di->data_offset = pos;
4925 di->component_size = a->info.component_size;
4926 di->container_member = dl->pdnum;
4927 di->next = rv;
4928 rv = di;
4929 dprintf("%x:%x (%08x) to be %d at %llu\n",
4930 dl->major, dl->minor,
4931 be32_to_cpu(dl->disk.refnum), i, pos);
4932
4933 break;
4934 }
4935 if (!dl && ! global_ok) {
4936 /* not enough dedicated spares, try global */
4937 global_ok = 1;
4938 dl = ddf->dlist;
4939 goto again;
4940 }
4941 }
4942
4943 if (!rv)
4944 /* No spares found */
4945 return rv;
4946 /* Now 'rv' has a list of devices to return.
4947 * Create a metadata_update record to update the
4948 * phys_refnum and lba_offset values
4949 */
4950 vc = find_vdcr(ddf, a->info.container_member, di->disk.raid_disk,
4951 &n_bvd, &vcl);
4952 if (vc == NULL)
4953 return NULL;
4954
4955 mu = xmalloc(sizeof(*mu));
4956 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
4957 free(mu);
4958 mu = NULL;
4959 }
4960
4961 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
4962 mu->buf = xmalloc(mu->len);
4963 mu->space = NULL;
4964 mu->space_list = NULL;
4965 mu->next = *updates;
4966 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
4967 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
4968 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
4969 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
4970
4971 vc = (struct vd_config*)mu->buf;
4972 for (di = rv ; di ; di = di->next) {
4973 unsigned int i_sec, i_prim;
4974 i_sec = di->disk.raid_disk
4975 / be16_to_cpu(vcl->conf.prim_elmnt_count);
4976 i_prim = di->disk.raid_disk
4977 % be16_to_cpu(vcl->conf.prim_elmnt_count);
4978 vc = (struct vd_config *)(mu->buf
4979 + i_sec * ddf->conf_rec_len * 512);
4980 for (dl = ddf->dlist; dl; dl = dl->next)
4981 if (dl->major == di->disk.major
4982 && dl->minor == di->disk.minor)
4983 break;
4984 if (!dl) {
4985 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
4986 __func__, di->disk.raid_disk,
4987 di->disk.major, di->disk.minor);
4988 return NULL;
4989 }
4990 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
4991 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
4992 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
4993 be32_to_cpu(vc->phys_refnum[i_prim]),
4994 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
4995 }
4996 *updates = mu;
4997 return rv;
4998 }
4999 #endif /* MDASSEMBLE */
5000
5001 static int ddf_level_to_layout(int level)
5002 {
5003 switch(level) {
5004 case 0:
5005 case 1:
5006 return 0;
5007 case 5:
5008 return ALGORITHM_LEFT_SYMMETRIC;
5009 case 6:
5010 return ALGORITHM_ROTATING_N_CONTINUE;
5011 case 10:
5012 return 0x102;
5013 default:
5014 return UnSet;
5015 }
5016 }
5017
5018 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5019 {
5020 if (level && *level == UnSet)
5021 *level = LEVEL_CONTAINER;
5022
5023 if (level && layout && *layout == UnSet)
5024 *layout = ddf_level_to_layout(*level);
5025 }
5026
5027 struct superswitch super_ddf = {
5028 #ifndef MDASSEMBLE
5029 .examine_super = examine_super_ddf,
5030 .brief_examine_super = brief_examine_super_ddf,
5031 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5032 .export_examine_super = export_examine_super_ddf,
5033 .detail_super = detail_super_ddf,
5034 .brief_detail_super = brief_detail_super_ddf,
5035 .validate_geometry = validate_geometry_ddf,
5036 .write_init_super = write_init_super_ddf,
5037 .add_to_super = add_to_super_ddf,
5038 .remove_from_super = remove_from_super_ddf,
5039 .load_container = load_container_ddf,
5040 .copy_metadata = copy_metadata_ddf,
5041 .kill_subarray = kill_subarray_ddf,
5042 #endif
5043 .match_home = match_home_ddf,
5044 .uuid_from_super= uuid_from_super_ddf,
5045 .getinfo_super = getinfo_super_ddf,
5046 .update_super = update_super_ddf,
5047
5048 .avail_size = avail_size_ddf,
5049
5050 .compare_super = compare_super_ddf,
5051
5052 .load_super = load_super_ddf,
5053 .init_super = init_super_ddf,
5054 .store_super = store_super_ddf,
5055 .free_super = free_super_ddf,
5056 .match_metadata_desc = match_metadata_desc_ddf,
5057 .container_content = container_content_ddf,
5058 .default_geometry = default_geometry_ddf,
5059
5060 .external = 1,
5061
5062 #ifndef MDASSEMBLE
5063 /* for mdmon */
5064 .open_new = ddf_open_new,
5065 .set_array_state= ddf_set_array_state,
5066 .set_disk = ddf_set_disk,
5067 .sync_metadata = ddf_sync_metadata,
5068 .process_update = ddf_process_update,
5069 .prepare_update = ddf_prepare_update,
5070 .activate_spare = ddf_activate_spare,
5071 #endif
5072 .name = "ddf",
5073 };