]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
ddf: remove failed devices that are no longer in use.
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 /* The DDF metadata handling.
48 * DDF metadata lives at the end of the device.
49 * The last 512 byte block provides an 'anchor' which is used to locate
50 * the rest of the metadata which usually lives immediately behind the anchor.
51 *
52 * Note:
53 * - all multibyte numeric fields are bigendian.
54 * - all strings are space padded.
55 *
56 */
57
58 /* Primary Raid Level (PRL) */
59 #define DDF_RAID0 0x00
60 #define DDF_RAID1 0x01
61 #define DDF_RAID3 0x03
62 #define DDF_RAID4 0x04
63 #define DDF_RAID5 0x05
64 #define DDF_RAID1E 0x11
65 #define DDF_JBOD 0x0f
66 #define DDF_CONCAT 0x1f
67 #define DDF_RAID5E 0x15
68 #define DDF_RAID5EE 0x25
69 #define DDF_RAID6 0x06
70
71 /* Raid Level Qualifier (RLQ) */
72 #define DDF_RAID0_SIMPLE 0x00
73 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
74 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
75 #define DDF_RAID3_0 0x00 /* parity in first extent */
76 #define DDF_RAID3_N 0x01 /* parity in last extent */
77 #define DDF_RAID4_0 0x00 /* parity in first extent */
78 #define DDF_RAID4_N 0x01 /* parity in last extent */
79 /* these apply to raid5e and raid5ee as well */
80 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
81 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
82 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
83 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
84
85 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
86 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
87
88 /* Secondary RAID Level (SRL) */
89 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
90 #define DDF_2MIRRORED 0x01
91 #define DDF_2CONCAT 0x02
92 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
93
94 /* Magic numbers */
95 #define DDF_HEADER_MAGIC __cpu_to_be32(0xDE11DE11)
96 #define DDF_CONTROLLER_MAGIC __cpu_to_be32(0xAD111111)
97 #define DDF_PHYS_RECORDS_MAGIC __cpu_to_be32(0x22222222)
98 #define DDF_PHYS_DATA_MAGIC __cpu_to_be32(0x33333333)
99 #define DDF_VIRT_RECORDS_MAGIC __cpu_to_be32(0xDDDDDDDD)
100 #define DDF_VD_CONF_MAGIC __cpu_to_be32(0xEEEEEEEE)
101 #define DDF_SPARE_ASSIGN_MAGIC __cpu_to_be32(0x55555555)
102 #define DDF_VU_CONF_MAGIC __cpu_to_be32(0x88888888)
103 #define DDF_VENDOR_LOG_MAGIC __cpu_to_be32(0x01dBEEF0)
104 #define DDF_BBM_LOG_MAGIC __cpu_to_be32(0xABADB10C)
105
106 #define DDF_GUID_LEN 24
107 #define DDF_REVISION_0 "01.00.00"
108 #define DDF_REVISION_2 "01.02.00"
109
110 struct ddf_header {
111 __u32 magic; /* DDF_HEADER_MAGIC */
112 __u32 crc;
113 char guid[DDF_GUID_LEN];
114 char revision[8]; /* 01.02.00 */
115 __u32 seq; /* starts at '1' */
116 __u32 timestamp;
117 __u8 openflag;
118 __u8 foreignflag;
119 __u8 enforcegroups;
120 __u8 pad0; /* 0xff */
121 __u8 pad1[12]; /* 12 * 0xff */
122 /* 64 bytes so far */
123 __u8 header_ext[32]; /* reserved: fill with 0xff */
124 __u64 primary_lba;
125 __u64 secondary_lba;
126 __u8 type;
127 __u8 pad2[3]; /* 0xff */
128 __u32 workspace_len; /* sectors for vendor space -
129 * at least 32768(sectors) */
130 __u64 workspace_lba;
131 __u16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
132 __u16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
133 __u16 max_partitions; /* i.e. max num of configuration
134 record entries per disk */
135 __u16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
136 *12/512) */
137 __u16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
138 __u8 pad3[54]; /* 0xff */
139 /* 192 bytes so far */
140 __u32 controller_section_offset;
141 __u32 controller_section_length;
142 __u32 phys_section_offset;
143 __u32 phys_section_length;
144 __u32 virt_section_offset;
145 __u32 virt_section_length;
146 __u32 config_section_offset;
147 __u32 config_section_length;
148 __u32 data_section_offset;
149 __u32 data_section_length;
150 __u32 bbm_section_offset;
151 __u32 bbm_section_length;
152 __u32 diag_space_offset;
153 __u32 diag_space_length;
154 __u32 vendor_offset;
155 __u32 vendor_length;
156 /* 256 bytes so far */
157 __u8 pad4[256]; /* 0xff */
158 };
159
160 /* type field */
161 #define DDF_HEADER_ANCHOR 0x00
162 #define DDF_HEADER_PRIMARY 0x01
163 #define DDF_HEADER_SECONDARY 0x02
164
165 /* The content of the 'controller section' - global scope */
166 struct ddf_controller_data {
167 __u32 magic; /* DDF_CONTROLLER_MAGIC */
168 __u32 crc;
169 char guid[DDF_GUID_LEN];
170 struct controller_type {
171 __u16 vendor_id;
172 __u16 device_id;
173 __u16 sub_vendor_id;
174 __u16 sub_device_id;
175 } type;
176 char product_id[16];
177 __u8 pad[8]; /* 0xff */
178 __u8 vendor_data[448];
179 };
180
181 /* The content of phys_section - global scope */
182 struct phys_disk {
183 __u32 magic; /* DDF_PHYS_RECORDS_MAGIC */
184 __u32 crc;
185 __u16 used_pdes;
186 __u16 max_pdes;
187 __u8 pad[52];
188 struct phys_disk_entry {
189 char guid[DDF_GUID_LEN];
190 __u32 refnum;
191 __u16 type;
192 __u16 state;
193 __u64 config_size; /* DDF structures must be after here */
194 char path[18]; /* another horrible structure really */
195 __u8 pad[6];
196 } entries[0];
197 };
198
199 /* phys_disk_entry.type is a bitmap - bigendian remember */
200 #define DDF_Forced_PD_GUID 1
201 #define DDF_Active_in_VD 2
202 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
203 #define DDF_Spare 8 /* overrides Global_spare */
204 #define DDF_Foreign 16
205 #define DDF_Legacy 32 /* no DDF on this device */
206
207 #define DDF_Interface_mask 0xf00
208 #define DDF_Interface_SCSI 0x100
209 #define DDF_Interface_SAS 0x200
210 #define DDF_Interface_SATA 0x300
211 #define DDF_Interface_FC 0x400
212
213 /* phys_disk_entry.state is a bigendian bitmap */
214 #define DDF_Online 1
215 #define DDF_Failed 2 /* overrides 1,4,8 */
216 #define DDF_Rebuilding 4
217 #define DDF_Transition 8
218 #define DDF_SMART 16
219 #define DDF_ReadErrors 32
220 #define DDF_Missing 64
221
222 /* The content of the virt_section global scope */
223 struct virtual_disk {
224 __u32 magic; /* DDF_VIRT_RECORDS_MAGIC */
225 __u32 crc;
226 __u16 populated_vdes;
227 __u16 max_vdes;
228 __u8 pad[52];
229 struct virtual_entry {
230 char guid[DDF_GUID_LEN];
231 __u16 unit;
232 __u16 pad0; /* 0xffff */
233 __u16 guid_crc;
234 __u16 type;
235 __u8 state;
236 __u8 init_state;
237 __u8 pad1[14];
238 char name[16];
239 } entries[0];
240 };
241
242 /* virtual_entry.type is a bitmap - bigendian */
243 #define DDF_Shared 1
244 #define DDF_Enforce_Groups 2
245 #define DDF_Unicode 4
246 #define DDF_Owner_Valid 8
247
248 /* virtual_entry.state is a bigendian bitmap */
249 #define DDF_state_mask 0x7
250 #define DDF_state_optimal 0x0
251 #define DDF_state_degraded 0x1
252 #define DDF_state_deleted 0x2
253 #define DDF_state_missing 0x3
254 #define DDF_state_failed 0x4
255 #define DDF_state_part_optimal 0x5
256
257 #define DDF_state_morphing 0x8
258 #define DDF_state_inconsistent 0x10
259
260 /* virtual_entry.init_state is a bigendian bitmap */
261 #define DDF_initstate_mask 0x03
262 #define DDF_init_not 0x00
263 #define DDF_init_quick 0x01 /* initialisation is progress.
264 * i.e. 'state_inconsistent' */
265 #define DDF_init_full 0x02
266
267 #define DDF_access_mask 0xc0
268 #define DDF_access_rw 0x00
269 #define DDF_access_ro 0x80
270 #define DDF_access_blocked 0xc0
271
272 /* The content of the config_section - local scope
273 * It has multiple records each config_record_len sectors
274 * They can be vd_config or spare_assign
275 */
276
277 struct vd_config {
278 __u32 magic; /* DDF_VD_CONF_MAGIC */
279 __u32 crc;
280 char guid[DDF_GUID_LEN];
281 __u32 timestamp;
282 __u32 seqnum;
283 __u8 pad0[24];
284 __u16 prim_elmnt_count;
285 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
286 __u8 prl;
287 __u8 rlq;
288 __u8 sec_elmnt_count;
289 __u8 sec_elmnt_seq;
290 __u8 srl;
291 __u64 blocks; /* blocks per component could be different
292 * on different component devices...(only
293 * for concat I hope) */
294 __u64 array_blocks; /* blocks in array */
295 __u8 pad1[8];
296 __u32 spare_refs[8];
297 __u8 cache_pol[8];
298 __u8 bg_rate;
299 __u8 pad2[3];
300 __u8 pad3[52];
301 __u8 pad4[192];
302 __u8 v0[32]; /* reserved- 0xff */
303 __u8 v1[32]; /* reserved- 0xff */
304 __u8 v2[16]; /* reserved- 0xff */
305 __u8 v3[16]; /* reserved- 0xff */
306 __u8 vendor[32];
307 __u32 phys_refnum[0]; /* refnum of each disk in sequence */
308 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
309 bvd are always the same size */
310 };
311
312 /* vd_config.cache_pol[7] is a bitmap */
313 #define DDF_cache_writeback 1 /* else writethrough */
314 #define DDF_cache_wadaptive 2 /* only applies if writeback */
315 #define DDF_cache_readahead 4
316 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
317 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
318 #define DDF_cache_wallowed 32 /* enable write caching */
319 #define DDF_cache_rallowed 64 /* enable read caching */
320
321 struct spare_assign {
322 __u32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
323 __u32 crc;
324 __u32 timestamp;
325 __u8 reserved[7];
326 __u8 type;
327 __u16 populated; /* SAEs used */
328 __u16 max; /* max SAEs */
329 __u8 pad[8];
330 struct spare_assign_entry {
331 char guid[DDF_GUID_LEN];
332 __u16 secondary_element;
333 __u8 pad[6];
334 } spare_ents[0];
335 };
336 /* spare_assign.type is a bitmap */
337 #define DDF_spare_dedicated 0x1 /* else global */
338 #define DDF_spare_revertible 0x2 /* else committable */
339 #define DDF_spare_active 0x4 /* else not active */
340 #define DDF_spare_affinity 0x8 /* enclosure affinity */
341
342 /* The data_section contents - local scope */
343 struct disk_data {
344 __u32 magic; /* DDF_PHYS_DATA_MAGIC */
345 __u32 crc;
346 char guid[DDF_GUID_LEN];
347 __u32 refnum; /* crc of some magic drive data ... */
348 __u8 forced_ref; /* set when above was not result of magic */
349 __u8 forced_guid; /* set if guid was forced rather than magic */
350 __u8 vendor[32];
351 __u8 pad[442];
352 };
353
354 /* bbm_section content */
355 struct bad_block_log {
356 __u32 magic;
357 __u32 crc;
358 __u16 entry_count;
359 __u32 spare_count;
360 __u8 pad[10];
361 __u64 first_spare;
362 struct mapped_block {
363 __u64 defective_start;
364 __u32 replacement_start;
365 __u16 remap_count;
366 __u8 pad[2];
367 } entries[0];
368 };
369
370 /* Struct for internally holding ddf structures */
371 /* The DDF structure stored on each device is potentially
372 * quite different, as some data is global and some is local.
373 * The global data is:
374 * - ddf header
375 * - controller_data
376 * - Physical disk records
377 * - Virtual disk records
378 * The local data is:
379 * - Configuration records
380 * - Physical Disk data section
381 * ( and Bad block and vendor which I don't care about yet).
382 *
383 * The local data is parsed into separate lists as it is read
384 * and reconstructed for writing. This means that we only need
385 * to make config changes once and they are automatically
386 * propagated to all devices.
387 * Note that the ddf_super has space of the conf and disk data
388 * for this disk and also for a list of all such data.
389 * The list is only used for the superblock that is being
390 * built in Create or Assemble to describe the whole array.
391 */
392 struct ddf_super {
393 struct ddf_header anchor, primary, secondary;
394 struct ddf_controller_data controller;
395 struct ddf_header *active;
396 struct phys_disk *phys;
397 struct virtual_disk *virt;
398 int pdsize, vdsize;
399 unsigned int max_part, mppe, conf_rec_len;
400 int currentdev;
401 int updates_pending;
402 struct vcl {
403 union {
404 char space[512];
405 struct {
406 struct vcl *next;
407 __u64 *lba_offset; /* location in 'conf' of
408 * the lba table */
409 unsigned int vcnum; /* index into ->virt */
410 __u64 *block_sizes; /* NULL if all the same */
411 };
412 };
413 struct vd_config conf;
414 } *conflist, *currentconf;
415 struct dl {
416 union {
417 char space[512];
418 struct {
419 struct dl *next;
420 int major, minor;
421 char *devname;
422 int fd;
423 unsigned long long size; /* sectors */
424 int pdnum; /* index in ->phys */
425 struct spare_assign *spare;
426 void *mdupdate; /* hold metadata update */
427
428 /* These fields used by auto-layout */
429 int raiddisk; /* slot to fill in autolayout */
430 __u64 esize;
431 };
432 };
433 struct disk_data disk;
434 struct vcl *vlist[0]; /* max_part in size */
435 } *dlist, *add_list;
436 };
437
438 #ifndef offsetof
439 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
440 #endif
441
442
443 static unsigned int calc_crc(void *buf, int len)
444 {
445 /* crcs are always at the same place as in the ddf_header */
446 struct ddf_header *ddf = buf;
447 __u32 oldcrc = ddf->crc;
448 __u32 newcrc;
449 ddf->crc = 0xffffffff;
450
451 newcrc = crc32(0, buf, len);
452 ddf->crc = oldcrc;
453 /* The crc is store (like everything) bigendian, so convert
454 * here for simplicity
455 */
456 return __cpu_to_be32(newcrc);
457 }
458
459 static int load_ddf_header(int fd, unsigned long long lba,
460 unsigned long long size,
461 int type,
462 struct ddf_header *hdr, struct ddf_header *anchor)
463 {
464 /* read a ddf header (primary or secondary) from fd/lba
465 * and check that it is consistent with anchor
466 * Need to check:
467 * magic, crc, guid, rev, and LBA's header_type, and
468 * everything after header_type must be the same
469 */
470 if (lba >= size-1)
471 return 0;
472
473 if (lseek64(fd, lba<<9, 0) < 0)
474 return 0;
475
476 if (read(fd, hdr, 512) != 512)
477 return 0;
478
479 if (hdr->magic != DDF_HEADER_MAGIC)
480 return 0;
481 if (calc_crc(hdr, 512) != hdr->crc)
482 return 0;
483 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
484 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
485 anchor->primary_lba != hdr->primary_lba ||
486 anchor->secondary_lba != hdr->secondary_lba ||
487 hdr->type != type ||
488 memcmp(anchor->pad2, hdr->pad2, 512 -
489 offsetof(struct ddf_header, pad2)) != 0)
490 return 0;
491
492 /* Looks good enough to me... */
493 return 1;
494 }
495
496 static void *load_section(int fd, struct ddf_super *super, void *buf,
497 __u32 offset_be, __u32 len_be, int check)
498 {
499 unsigned long long offset = __be32_to_cpu(offset_be);
500 unsigned long long len = __be32_to_cpu(len_be);
501 int dofree = (buf == NULL);
502
503 if (check)
504 if (len != 2 && len != 8 && len != 32
505 && len != 128 && len != 512)
506 return NULL;
507
508 if (len > 1024)
509 return NULL;
510 if (buf) {
511 /* All pre-allocated sections are a single block */
512 if (len != 1)
513 return NULL;
514 } else if (posix_memalign(&buf, 512, len<<9) != 0)
515 buf = NULL;
516
517 if (!buf)
518 return NULL;
519
520 if (super->active->type == 1)
521 offset += __be64_to_cpu(super->active->primary_lba);
522 else
523 offset += __be64_to_cpu(super->active->secondary_lba);
524
525 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
526 if (dofree)
527 free(buf);
528 return NULL;
529 }
530 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
531 if (dofree)
532 free(buf);
533 return NULL;
534 }
535 return buf;
536 }
537
538 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
539 {
540 unsigned long long dsize;
541
542 get_dev_size(fd, NULL, &dsize);
543
544 if (lseek64(fd, dsize-512, 0) < 0) {
545 if (devname)
546 fprintf(stderr,
547 Name": Cannot seek to anchor block on %s: %s\n",
548 devname, strerror(errno));
549 return 1;
550 }
551 if (read(fd, &super->anchor, 512) != 512) {
552 if (devname)
553 fprintf(stderr,
554 Name ": Cannot read anchor block on %s: %s\n",
555 devname, strerror(errno));
556 return 1;
557 }
558 if (super->anchor.magic != DDF_HEADER_MAGIC) {
559 if (devname)
560 fprintf(stderr, Name ": no DDF anchor found on %s\n",
561 devname);
562 return 2;
563 }
564 if (calc_crc(&super->anchor, 512) != super->anchor.crc) {
565 if (devname)
566 fprintf(stderr, Name ": bad CRC on anchor on %s\n",
567 devname);
568 return 2;
569 }
570 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
571 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
572 if (devname)
573 fprintf(stderr, Name ": can only support super revision"
574 " %.8s and earlier, not %.8s on %s\n",
575 DDF_REVISION_2, super->anchor.revision,devname);
576 return 2;
577 }
578 if (load_ddf_header(fd, __be64_to_cpu(super->anchor.primary_lba),
579 dsize >> 9, 1,
580 &super->primary, &super->anchor) == 0) {
581 if (devname)
582 fprintf(stderr,
583 Name ": Failed to load primary DDF header "
584 "on %s\n", devname);
585 return 2;
586 }
587 super->active = &super->primary;
588 if (load_ddf_header(fd, __be64_to_cpu(super->anchor.secondary_lba),
589 dsize >> 9, 2,
590 &super->secondary, &super->anchor)) {
591 if ((__be32_to_cpu(super->primary.seq)
592 < __be32_to_cpu(super->secondary.seq) &&
593 !super->secondary.openflag)
594 || (__be32_to_cpu(super->primary.seq)
595 == __be32_to_cpu(super->secondary.seq) &&
596 super->primary.openflag && !super->secondary.openflag)
597 )
598 super->active = &super->secondary;
599 }
600 return 0;
601 }
602
603 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
604 {
605 void *ok;
606 ok = load_section(fd, super, &super->controller,
607 super->active->controller_section_offset,
608 super->active->controller_section_length,
609 0);
610 super->phys = load_section(fd, super, NULL,
611 super->active->phys_section_offset,
612 super->active->phys_section_length,
613 1);
614 super->pdsize = __be32_to_cpu(super->active->phys_section_length) * 512;
615
616 super->virt = load_section(fd, super, NULL,
617 super->active->virt_section_offset,
618 super->active->virt_section_length,
619 1);
620 super->vdsize = __be32_to_cpu(super->active->virt_section_length) * 512;
621 if (!ok ||
622 !super->phys ||
623 !super->virt) {
624 free(super->phys);
625 free(super->virt);
626 super->phys = NULL;
627 super->virt = NULL;
628 return 2;
629 }
630 super->conflist = NULL;
631 super->dlist = NULL;
632
633 super->max_part = __be16_to_cpu(super->active->max_partitions);
634 super->mppe = __be16_to_cpu(super->active->max_primary_element_entries);
635 super->conf_rec_len = __be16_to_cpu(super->active->config_record_len);
636 return 0;
637 }
638
639 static int load_ddf_local(int fd, struct ddf_super *super,
640 char *devname, int keep)
641 {
642 struct dl *dl;
643 struct stat stb;
644 char *conf;
645 unsigned int i;
646 unsigned int confsec;
647 int vnum;
648 unsigned int max_virt_disks = __be16_to_cpu(super->active->max_vd_entries);
649 unsigned long long dsize;
650
651 /* First the local disk info */
652 if (posix_memalign((void**)&dl, 512,
653 sizeof(*dl) +
654 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
655 fprintf(stderr, Name ": %s could not allocate disk info buffer\n",
656 __func__);
657 return 1;
658 }
659
660 load_section(fd, super, &dl->disk,
661 super->active->data_section_offset,
662 super->active->data_section_length,
663 0);
664 dl->devname = devname ? strdup(devname) : NULL;
665
666 fstat(fd, &stb);
667 dl->major = major(stb.st_rdev);
668 dl->minor = minor(stb.st_rdev);
669 dl->next = super->dlist;
670 dl->fd = keep ? fd : -1;
671
672 dl->size = 0;
673 if (get_dev_size(fd, devname, &dsize))
674 dl->size = dsize >> 9;
675 dl->spare = NULL;
676 for (i = 0 ; i < super->max_part ; i++)
677 dl->vlist[i] = NULL;
678 super->dlist = dl;
679 dl->pdnum = -1;
680 for (i = 0; i < __be16_to_cpu(super->active->max_pd_entries); i++)
681 if (memcmp(super->phys->entries[i].guid,
682 dl->disk.guid, DDF_GUID_LEN) == 0)
683 dl->pdnum = i;
684
685 /* Now the config list. */
686 /* 'conf' is an array of config entries, some of which are
687 * probably invalid. Those which are good need to be copied into
688 * the conflist
689 */
690
691 conf = load_section(fd, super, NULL,
692 super->active->config_section_offset,
693 super->active->config_section_length,
694 0);
695
696 vnum = 0;
697 for (confsec = 0;
698 confsec < __be32_to_cpu(super->active->config_section_length);
699 confsec += super->conf_rec_len) {
700 struct vd_config *vd =
701 (struct vd_config *)((char*)conf + confsec*512);
702 struct vcl *vcl;
703
704 if (vd->magic == DDF_SPARE_ASSIGN_MAGIC) {
705 if (dl->spare)
706 continue;
707 if (posix_memalign((void**)&dl->spare, 512,
708 super->conf_rec_len*512) != 0) {
709 fprintf(stderr, Name
710 ": %s could not allocate spare info buf\n",
711 __func__);
712 return 1;
713 }
714
715 memcpy(dl->spare, vd, super->conf_rec_len*512);
716 continue;
717 }
718 if (vd->magic != DDF_VD_CONF_MAGIC)
719 continue;
720 for (vcl = super->conflist; vcl; vcl = vcl->next) {
721 if (memcmp(vcl->conf.guid,
722 vd->guid, DDF_GUID_LEN) == 0)
723 break;
724 }
725
726 if (vcl) {
727 dl->vlist[vnum++] = vcl;
728 if (__be32_to_cpu(vd->seqnum) <=
729 __be32_to_cpu(vcl->conf.seqnum))
730 continue;
731 } else {
732 if (posix_memalign((void**)&vcl, 512,
733 (super->conf_rec_len*512 +
734 offsetof(struct vcl, conf))) != 0) {
735 fprintf(stderr, Name
736 ": %s could not allocate vcl buf\n",
737 __func__);
738 return 1;
739 }
740 vcl->next = super->conflist;
741 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
742 super->conflist = vcl;
743 dl->vlist[vnum++] = vcl;
744 }
745 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
746 vcl->lba_offset = (__u64*)
747 &vcl->conf.phys_refnum[super->mppe];
748
749 for (i=0; i < max_virt_disks ; i++)
750 if (memcmp(super->virt->entries[i].guid,
751 vcl->conf.guid, DDF_GUID_LEN)==0)
752 break;
753 if (i < max_virt_disks)
754 vcl->vcnum = i;
755 }
756 free(conf);
757
758 return 0;
759 }
760
761 #ifndef MDASSEMBLE
762 static int load_super_ddf_all(struct supertype *st, int fd,
763 void **sbp, char *devname, int keep_fd);
764 #endif
765
766 static void free_super_ddf(struct supertype *st);
767
768 static int load_super_ddf(struct supertype *st, int fd,
769 char *devname)
770 {
771 unsigned long long dsize;
772 struct ddf_super *super;
773 int rv;
774
775 #ifndef MDASSEMBLE
776 /* if 'fd' is a container, load metadata from all the devices */
777 if (load_super_ddf_all(st, fd, &st->sb, devname, 1) == 0)
778 return 0;
779 #endif
780 if (st->subarray[0])
781 return 1; /* FIXME Is this correct */
782
783 if (get_dev_size(fd, devname, &dsize) == 0)
784 return 1;
785
786 if (test_partition(fd))
787 /* DDF is not allowed on partitions */
788 return 1;
789
790 /* 32M is a lower bound */
791 if (dsize <= 32*1024*1024) {
792 if (devname)
793 fprintf(stderr,
794 Name ": %s is too small for ddf: "
795 "size is %llu sectors.\n",
796 devname, dsize>>9);
797 return 1;
798 }
799 if (dsize & 511) {
800 if (devname)
801 fprintf(stderr,
802 Name ": %s is an odd size for ddf: "
803 "size is %llu bytes.\n",
804 devname, dsize);
805 return 1;
806 }
807
808 free_super_ddf(st);
809
810 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
811 fprintf(stderr, Name ": malloc of %zu failed.\n",
812 sizeof(*super));
813 return 1;
814 }
815 memset(super, 0, sizeof(*super));
816
817 rv = load_ddf_headers(fd, super, devname);
818 if (rv) {
819 free(super);
820 return rv;
821 }
822
823 /* Have valid headers and have chosen the best. Let's read in the rest*/
824
825 rv = load_ddf_global(fd, super, devname);
826
827 if (rv) {
828 if (devname)
829 fprintf(stderr,
830 Name ": Failed to load all information "
831 "sections on %s\n", devname);
832 free(super);
833 return rv;
834 }
835
836 rv = load_ddf_local(fd, super, devname, 0);
837
838 if (rv) {
839 if (devname)
840 fprintf(stderr,
841 Name ": Failed to load all information "
842 "sections on %s\n", devname);
843 free(super);
844 return rv;
845 }
846
847 if (st->subarray[0]) {
848 unsigned long val;
849 struct vcl *v;
850 char *ep;
851
852 val = strtoul(st->subarray, &ep, 10);
853 if (*ep != '\0') {
854 free(super);
855 return 1;
856 }
857
858 for (v = super->conflist; v; v = v->next)
859 if (v->vcnum == val)
860 super->currentconf = v;
861 if (!super->currentconf) {
862 free(super);
863 return 1;
864 }
865 }
866
867 /* Should possibly check the sections .... */
868
869 st->sb = super;
870 if (st->ss == NULL) {
871 st->ss = &super_ddf;
872 st->minor_version = 0;
873 st->max_devs = 512;
874 }
875 st->loaded_container = 0;
876 return 0;
877
878 }
879
880 static void free_super_ddf(struct supertype *st)
881 {
882 struct ddf_super *ddf = st->sb;
883 if (ddf == NULL)
884 return;
885 free(ddf->phys);
886 free(ddf->virt);
887 while (ddf->conflist) {
888 struct vcl *v = ddf->conflist;
889 ddf->conflist = v->next;
890 if (v->block_sizes)
891 free(v->block_sizes);
892 free(v);
893 }
894 while (ddf->dlist) {
895 struct dl *d = ddf->dlist;
896 ddf->dlist = d->next;
897 if (d->fd >= 0)
898 close(d->fd);
899 if (d->spare)
900 free(d->spare);
901 free(d);
902 }
903 while (ddf->add_list) {
904 struct dl *d = ddf->add_list;
905 ddf->add_list = d->next;
906 if (d->fd >= 0)
907 close(d->fd);
908 if (d->spare)
909 free(d->spare);
910 free(d);
911 }
912 free(ddf);
913 st->sb = NULL;
914 }
915
916 static struct supertype *match_metadata_desc_ddf(char *arg)
917 {
918 /* 'ddf' only support containers */
919 struct supertype *st;
920 if (strcmp(arg, "ddf") != 0 &&
921 strcmp(arg, "default") != 0
922 )
923 return NULL;
924
925 st = malloc(sizeof(*st));
926 memset(st, 0, sizeof(*st));
927 st->ss = &super_ddf;
928 st->max_devs = 512;
929 st->minor_version = 0;
930 st->sb = NULL;
931 return st;
932 }
933
934
935 #ifndef MDASSEMBLE
936
937 static mapping_t ddf_state[] = {
938 { "Optimal", 0},
939 { "Degraded", 1},
940 { "Deleted", 2},
941 { "Missing", 3},
942 { "Failed", 4},
943 { "Partially Optimal", 5},
944 { "-reserved-", 6},
945 { "-reserved-", 7},
946 { NULL, 0}
947 };
948
949 static mapping_t ddf_init_state[] = {
950 { "Not Initialised", 0},
951 { "QuickInit in Progress", 1},
952 { "Fully Initialised", 2},
953 { "*UNKNOWN*", 3},
954 { NULL, 0}
955 };
956 static mapping_t ddf_access[] = {
957 { "Read/Write", 0},
958 { "Reserved", 1},
959 { "Read Only", 2},
960 { "Blocked (no access)", 3},
961 { NULL ,0}
962 };
963
964 static mapping_t ddf_level[] = {
965 { "RAID0", DDF_RAID0},
966 { "RAID1", DDF_RAID1},
967 { "RAID3", DDF_RAID3},
968 { "RAID4", DDF_RAID4},
969 { "RAID5", DDF_RAID5},
970 { "RAID1E",DDF_RAID1E},
971 { "JBOD", DDF_JBOD},
972 { "CONCAT",DDF_CONCAT},
973 { "RAID5E",DDF_RAID5E},
974 { "RAID5EE",DDF_RAID5EE},
975 { "RAID6", DDF_RAID6},
976 { NULL, 0}
977 };
978 static mapping_t ddf_sec_level[] = {
979 { "Striped", DDF_2STRIPED},
980 { "Mirrored", DDF_2MIRRORED},
981 { "Concat", DDF_2CONCAT},
982 { "Spanned", DDF_2SPANNED},
983 { NULL, 0}
984 };
985 #endif
986
987 struct num_mapping {
988 int num1, num2;
989 };
990 static struct num_mapping ddf_level_num[] = {
991 { DDF_RAID0, 0 },
992 { DDF_RAID1, 1 },
993 { DDF_RAID3, LEVEL_UNSUPPORTED },
994 { DDF_RAID4, 4 },
995 { DDF_RAID5, 5 },
996 { DDF_RAID1E, LEVEL_UNSUPPORTED },
997 { DDF_JBOD, LEVEL_UNSUPPORTED },
998 { DDF_CONCAT, LEVEL_LINEAR },
999 { DDF_RAID5E, LEVEL_UNSUPPORTED },
1000 { DDF_RAID5EE, LEVEL_UNSUPPORTED },
1001 { DDF_RAID6, 6},
1002 { MAXINT, MAXINT }
1003 };
1004
1005 static int map_num1(struct num_mapping *map, int num)
1006 {
1007 int i;
1008 for (i=0 ; map[i].num1 != MAXINT; i++)
1009 if (map[i].num1 == num)
1010 break;
1011 return map[i].num2;
1012 }
1013
1014 static int all_ff(char *guid)
1015 {
1016 int i;
1017 for (i = 0; i < DDF_GUID_LEN; i++)
1018 if (guid[i] != (char)0xff)
1019 return 0;
1020 return 1;
1021 }
1022
1023 #ifndef MDASSEMBLE
1024 static void print_guid(char *guid, int tstamp)
1025 {
1026 /* A GUIDs are part (or all) ASCII and part binary.
1027 * They tend to be space padded.
1028 * We print the GUID in HEX, then in parentheses add
1029 * any initial ASCII sequence, and a possible
1030 * time stamp from bytes 16-19
1031 */
1032 int l = DDF_GUID_LEN;
1033 int i;
1034
1035 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1036 if ((i&3)==0 && i != 0) printf(":");
1037 printf("%02X", guid[i]&255);
1038 }
1039
1040 printf("\n (");
1041 while (l && guid[l-1] == ' ')
1042 l--;
1043 for (i=0 ; i<l ; i++) {
1044 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1045 fputc(guid[i], stdout);
1046 else
1047 break;
1048 }
1049 if (tstamp) {
1050 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1051 char tbuf[100];
1052 struct tm *tm;
1053 tm = localtime(&then);
1054 strftime(tbuf, 100, " %D %T",tm);
1055 fputs(tbuf, stdout);
1056 }
1057 printf(")");
1058 }
1059
1060 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1061 {
1062 int crl = sb->conf_rec_len;
1063 struct vcl *vcl;
1064
1065 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1066 unsigned int i;
1067 struct vd_config *vc = &vcl->conf;
1068
1069 if (calc_crc(vc, crl*512) != vc->crc)
1070 continue;
1071 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1072 continue;
1073
1074 /* Ok, we know about this VD, let's give more details */
1075 printf(" Raid Devices[%d] : %d (", n,
1076 __be16_to_cpu(vc->prim_elmnt_count));
1077 for (i = 0; i < __be16_to_cpu(vc->prim_elmnt_count); i++) {
1078 int j;
1079 int cnt = __be16_to_cpu(sb->phys->used_pdes);
1080 for (j=0; j<cnt; j++)
1081 if (vc->phys_refnum[i] == sb->phys->entries[j].refnum)
1082 break;
1083 if (i) printf(" ");
1084 if (j < cnt)
1085 printf("%d", j);
1086 else
1087 printf("--");
1088 }
1089 printf(")\n");
1090 if (vc->chunk_shift != 255)
1091 printf(" Chunk Size[%d] : %d sectors\n", n,
1092 1 << vc->chunk_shift);
1093 printf(" Raid Level[%d] : %s\n", n,
1094 map_num(ddf_level, vc->prl)?:"-unknown-");
1095 if (vc->sec_elmnt_count != 1) {
1096 printf(" Secondary Position[%d] : %d of %d\n", n,
1097 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1098 printf(" Secondary Level[%d] : %s\n", n,
1099 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1100 }
1101 printf(" Device Size[%d] : %llu\n", n,
1102 (unsigned long long)__be64_to_cpu(vc->blocks)/2);
1103 printf(" Array Size[%d] : %llu\n", n,
1104 (unsigned long long)__be64_to_cpu(vc->array_blocks)/2);
1105 }
1106 }
1107
1108 static void examine_vds(struct ddf_super *sb)
1109 {
1110 int cnt = __be16_to_cpu(sb->virt->populated_vdes);
1111 int i;
1112 printf(" Virtual Disks : %d\n", cnt);
1113
1114 for (i=0; i<cnt; i++) {
1115 struct virtual_entry *ve = &sb->virt->entries[i];
1116 printf("\n");
1117 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1118 printf("\n");
1119 printf(" unit[%d] : %d\n", i, __be16_to_cpu(ve->unit));
1120 printf(" state[%d] : %s, %s%s\n", i,
1121 map_num(ddf_state, ve->state & 7),
1122 (ve->state & 8) ? "Morphing, ": "",
1123 (ve->state & 16)? "Not Consistent" : "Consistent");
1124 printf(" init state[%d] : %s\n", i,
1125 map_num(ddf_init_state, ve->init_state&3));
1126 printf(" access[%d] : %s\n", i,
1127 map_num(ddf_access, (ve->init_state>>6) & 3));
1128 printf(" Name[%d] : %.16s\n", i, ve->name);
1129 examine_vd(i, sb, ve->guid);
1130 }
1131 if (cnt) printf("\n");
1132 }
1133
1134 static void examine_pds(struct ddf_super *sb)
1135 {
1136 int cnt = __be16_to_cpu(sb->phys->used_pdes);
1137 int i;
1138 struct dl *dl;
1139 printf(" Physical Disks : %d\n", cnt);
1140 printf(" Number RefNo Size Device Type/State\n");
1141
1142 for (i=0 ; i<cnt ; i++) {
1143 struct phys_disk_entry *pd = &sb->phys->entries[i];
1144 int type = __be16_to_cpu(pd->type);
1145 int state = __be16_to_cpu(pd->state);
1146
1147 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1148 //printf("\n");
1149 printf(" %3d %08x ", i,
1150 __be32_to_cpu(pd->refnum));
1151 printf("%8lluK ",
1152 (unsigned long long)__be64_to_cpu(pd->config_size)>>1);
1153 for (dl = sb->dlist; dl ; dl = dl->next) {
1154 if (dl->disk.refnum == pd->refnum) {
1155 char *dv = map_dev(dl->major, dl->minor, 0);
1156 if (dv) {
1157 printf("%-15s", dv);
1158 break;
1159 }
1160 }
1161 }
1162 if (!dl)
1163 printf("%15s","");
1164 printf(" %s%s%s%s%s",
1165 (type&2) ? "active":"",
1166 (type&4) ? "Global-Spare":"",
1167 (type&8) ? "spare" : "",
1168 (type&16)? ", foreign" : "",
1169 (type&32)? "pass-through" : "");
1170 if (state & DDF_Failed)
1171 /* This over-rides these three */
1172 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1173 printf("/%s%s%s%s%s%s%s",
1174 (state&1)? "Online": "Offline",
1175 (state&2)? ", Failed": "",
1176 (state&4)? ", Rebuilding": "",
1177 (state&8)? ", in-transition": "",
1178 (state&16)? ", SMART-errors": "",
1179 (state&32)? ", Unrecovered-Read-Errors": "",
1180 (state&64)? ", Missing" : "");
1181 printf("\n");
1182 }
1183 }
1184
1185 static void examine_super_ddf(struct supertype *st, char *homehost)
1186 {
1187 struct ddf_super *sb = st->sb;
1188
1189 printf(" Magic : %08x\n", __be32_to_cpu(sb->anchor.magic));
1190 printf(" Version : %.8s\n", sb->anchor.revision);
1191 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1192 printf("\n");
1193 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1194 printf("\n");
1195 printf(" Seq : %08x\n", __be32_to_cpu(sb->active->seq));
1196 printf(" Redundant hdr : %s\n", sb->secondary.magic == DDF_HEADER_MAGIC
1197 ?"yes" : "no");
1198 examine_vds(sb);
1199 examine_pds(sb);
1200 }
1201
1202 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info);
1203
1204 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1205
1206 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1207 {
1208 /* We just write a generic DDF ARRAY entry
1209 */
1210 struct mdinfo info;
1211 char nbuf[64];
1212 getinfo_super_ddf(st, &info);
1213 fname_from_uuid(st, &info, nbuf, ':');
1214
1215 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1216 }
1217
1218 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1219 {
1220 /* We just write a generic DDF ARRAY entry
1221 */
1222 struct ddf_super *ddf = st->sb;
1223 struct mdinfo info;
1224 unsigned int i;
1225 char nbuf[64];
1226 getinfo_super_ddf(st, &info);
1227 fname_from_uuid(st, &info, nbuf, ':');
1228
1229 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
1230 struct virtual_entry *ve = &ddf->virt->entries[i];
1231 struct vcl vcl;
1232 char nbuf1[64];
1233 if (all_ff(ve->guid))
1234 continue;
1235 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1236 ddf->currentconf =&vcl;
1237 uuid_from_super_ddf(st, info.uuid);
1238 fname_from_uuid(st, &info, nbuf1, ':');
1239 printf("ARRAY container=%s member=%d UUID=%s\n",
1240 nbuf+5, i, nbuf1+5);
1241 }
1242 }
1243
1244 static void export_examine_super_ddf(struct supertype *st)
1245 {
1246 struct mdinfo info;
1247 char nbuf[64];
1248 getinfo_super_ddf(st, &info);
1249 fname_from_uuid(st, &info, nbuf, ':');
1250 printf("MD_METADATA=ddf\n");
1251 printf("MD_LEVEL=container\n");
1252 printf("MD_UUID=%s\n", nbuf+5);
1253 }
1254
1255
1256 static void detail_super_ddf(struct supertype *st, char *homehost)
1257 {
1258 /* FIXME later
1259 * Could print DDF GUID
1260 * Need to find which array
1261 * If whole, briefly list all arrays
1262 * If one, give name
1263 */
1264 }
1265
1266 static void brief_detail_super_ddf(struct supertype *st)
1267 {
1268 /* FIXME I really need to know which array we are detailing.
1269 * Can that be stored in ddf_super??
1270 */
1271 // struct ddf_super *ddf = st->sb;
1272 struct mdinfo info;
1273 char nbuf[64];
1274 getinfo_super_ddf(st, &info);
1275 fname_from_uuid(st, &info, nbuf,':');
1276 printf(" UUID=%s", nbuf + 5);
1277 }
1278 #endif
1279
1280 static int match_home_ddf(struct supertype *st, char *homehost)
1281 {
1282 /* It matches 'this' host if the controller is a
1283 * Linux-MD controller with vendor_data matching
1284 * the hostname
1285 */
1286 struct ddf_super *ddf = st->sb;
1287 unsigned int len;
1288
1289 if (!homehost)
1290 return 0;
1291 len = strlen(homehost);
1292
1293 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1294 len < sizeof(ddf->controller.vendor_data) &&
1295 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1296 ddf->controller.vendor_data[len] == 0);
1297 }
1298
1299 #ifndef MDASSEMBLE
1300 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst)
1301 {
1302 struct vcl *v;
1303
1304 for (v = ddf->conflist; v; v = v->next)
1305 if (inst == v->vcnum)
1306 return &v->conf;
1307 return NULL;
1308 }
1309 #endif
1310
1311 static int find_phys(struct ddf_super *ddf, __u32 phys_refnum)
1312 {
1313 /* Find the entry in phys_disk which has the given refnum
1314 * and return it's index
1315 */
1316 unsigned int i;
1317 for (i = 0; i < __be16_to_cpu(ddf->phys->max_pdes); i++)
1318 if (ddf->phys->entries[i].refnum == phys_refnum)
1319 return i;
1320 return -1;
1321 }
1322
1323 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1324 {
1325 /* The uuid returned here is used for:
1326 * uuid to put into bitmap file (Create, Grow)
1327 * uuid for backup header when saving critical section (Grow)
1328 * comparing uuids when re-adding a device into an array
1329 * In these cases the uuid required is that of the data-array,
1330 * not the device-set.
1331 * uuid to recognise same set when adding a missing device back
1332 * to an array. This is a uuid for the device-set.
1333 *
1334 * For each of these we can make do with a truncated
1335 * or hashed uuid rather than the original, as long as
1336 * everyone agrees.
1337 * In the case of SVD we assume the BVD is of interest,
1338 * though that might be the case if a bitmap were made for
1339 * a mirrored SVD - worry about that later.
1340 * So we need to find the VD configuration record for the
1341 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1342 * The first 16 bytes of the sha1 of these is used.
1343 */
1344 struct ddf_super *ddf = st->sb;
1345 struct vcl *vcl = ddf->currentconf;
1346 char *guid;
1347 char buf[20];
1348 struct sha1_ctx ctx;
1349
1350 if (vcl)
1351 guid = vcl->conf.guid;
1352 else
1353 guid = ddf->anchor.guid;
1354
1355 sha1_init_ctx(&ctx);
1356 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1357 sha1_finish_ctx(&ctx, buf);
1358 memcpy(uuid, buf, 4*4);
1359 }
1360
1361 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info);
1362
1363 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info)
1364 {
1365 struct ddf_super *ddf = st->sb;
1366
1367 if (ddf->currentconf) {
1368 getinfo_super_ddf_bvd(st, info);
1369 return;
1370 }
1371
1372 info->array.raid_disks = __be16_to_cpu(ddf->phys->used_pdes);
1373 info->array.level = LEVEL_CONTAINER;
1374 info->array.layout = 0;
1375 info->array.md_minor = -1;
1376 info->array.ctime = DECADE + __be32_to_cpu(*(__u32*)
1377 (ddf->anchor.guid+16));
1378 info->array.utime = 0;
1379 info->array.chunk_size = 0;
1380 info->container_enough = 1;
1381
1382
1383 info->disk.major = 0;
1384 info->disk.minor = 0;
1385 if (ddf->dlist) {
1386 info->disk.number = __be32_to_cpu(ddf->dlist->disk.refnum);
1387 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1388
1389 info->data_offset = __be64_to_cpu(ddf->phys->
1390 entries[info->disk.raid_disk].
1391 config_size);
1392 info->component_size = ddf->dlist->size - info->data_offset;
1393 } else {
1394 info->disk.number = -1;
1395 info->disk.raid_disk = -1;
1396 // info->disk.raid_disk = find refnum in the table and use index;
1397 }
1398 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1399
1400
1401 info->recovery_start = MaxSector;
1402 info->reshape_active = 0;
1403 info->name[0] = 0;
1404
1405 info->array.major_version = -1;
1406 info->array.minor_version = -2;
1407 strcpy(info->text_version, "ddf");
1408 info->safe_mode_delay = 0;
1409
1410 uuid_from_super_ddf(st, info->uuid);
1411
1412 }
1413
1414 static int rlq_to_layout(int rlq, int prl, int raiddisks);
1415
1416 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info)
1417 {
1418 struct ddf_super *ddf = st->sb;
1419 struct vcl *vc = ddf->currentconf;
1420 int cd = ddf->currentdev;
1421 int j;
1422 struct dl *dl;
1423
1424 /* FIXME this returns BVD info - what if we want SVD ?? */
1425
1426 info->array.raid_disks = __be16_to_cpu(vc->conf.prim_elmnt_count);
1427 info->array.level = map_num1(ddf_level_num, vc->conf.prl);
1428 info->array.layout = rlq_to_layout(vc->conf.rlq, vc->conf.prl,
1429 info->array.raid_disks);
1430 info->array.md_minor = -1;
1431 info->array.ctime = DECADE +
1432 __be32_to_cpu(*(__u32*)(vc->conf.guid+16));
1433 info->array.utime = DECADE + __be32_to_cpu(vc->conf.timestamp);
1434 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1435 info->custom_array_size = 0;
1436
1437 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1438 info->data_offset = __be64_to_cpu(vc->lba_offset[cd]);
1439 if (vc->block_sizes)
1440 info->component_size = vc->block_sizes[cd];
1441 else
1442 info->component_size = __be64_to_cpu(vc->conf.blocks);
1443 }
1444
1445 for (dl = ddf->dlist; dl ; dl = dl->next)
1446 if (dl->raiddisk == info->disk.raid_disk)
1447 break;
1448 info->disk.major = 0;
1449 info->disk.minor = 0;
1450 if (dl) {
1451 info->disk.major = dl->major;
1452 info->disk.minor = dl->minor;
1453 }
1454 // info->disk.number = __be32_to_cpu(ddf->disk.refnum);
1455 // info->disk.raid_disk = find refnum in the table and use index;
1456 // info->disk.state = ???;
1457
1458 info->container_member = ddf->currentconf->vcnum;
1459
1460 info->recovery_start = MaxSector;
1461 info->resync_start = 0;
1462 info->reshape_active = 0;
1463 if (!(ddf->virt->entries[info->container_member].state
1464 & DDF_state_inconsistent) &&
1465 (ddf->virt->entries[info->container_member].init_state
1466 & DDF_initstate_mask)
1467 == DDF_init_full)
1468 info->resync_start = MaxSector;
1469
1470 uuid_from_super_ddf(st, info->uuid);
1471
1472 info->array.major_version = -1;
1473 info->array.minor_version = -2;
1474 sprintf(info->text_version, "/%s/%s",
1475 devnum2devname(st->container_dev),
1476 st->subarray);
1477 info->safe_mode_delay = 200;
1478
1479 memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
1480 info->name[16]=0;
1481 for(j=0; j<16; j++)
1482 if (info->name[j] == ' ')
1483 info->name[j] = 0;
1484 }
1485
1486
1487 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
1488 char *update,
1489 char *devname, int verbose,
1490 int uuid_set, char *homehost)
1491 {
1492 /* For 'assemble' and 'force' we need to return non-zero if any
1493 * change was made. For others, the return value is ignored.
1494 * Update options are:
1495 * force-one : This device looks a bit old but needs to be included,
1496 * update age info appropriately.
1497 * assemble: clear any 'faulty' flag to allow this device to
1498 * be assembled.
1499 * force-array: Array is degraded but being forced, mark it clean
1500 * if that will be needed to assemble it.
1501 *
1502 * newdev: not used ????
1503 * grow: Array has gained a new device - this is currently for
1504 * linear only
1505 * resync: mark as dirty so a resync will happen.
1506 * uuid: Change the uuid of the array to match what is given
1507 * homehost: update the recorded homehost
1508 * name: update the name - preserving the homehost
1509 * _reshape_progress: record new reshape_progress position.
1510 *
1511 * Following are not relevant for this version:
1512 * sparc2.2 : update from old dodgey metadata
1513 * super-minor: change the preferred_minor number
1514 * summaries: update redundant counters.
1515 */
1516 int rv = 0;
1517 // struct ddf_super *ddf = st->sb;
1518 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
1519 // struct virtual_entry *ve = find_ve(ddf);
1520
1521 /* we don't need to handle "force-*" or "assemble" as
1522 * there is no need to 'trick' the kernel. We the metadata is
1523 * first updated to activate the array, all the implied modifications
1524 * will just happen.
1525 */
1526
1527 if (strcmp(update, "grow") == 0) {
1528 /* FIXME */
1529 }
1530 if (strcmp(update, "resync") == 0) {
1531 // info->resync_checkpoint = 0;
1532 }
1533 /* We ignore UUID updates as they make even less sense
1534 * with DDF
1535 */
1536 if (strcmp(update, "homehost") == 0) {
1537 /* homehost is stored in controller->vendor_data,
1538 * or it is when we are the vendor
1539 */
1540 // if (info->vendor_is_local)
1541 // strcpy(ddf->controller.vendor_data, homehost);
1542 }
1543 if (strcmp(update, "name") == 0) {
1544 /* name is stored in virtual_entry->name */
1545 // memset(ve->name, ' ', 16);
1546 // strncpy(ve->name, info->name, 16);
1547 }
1548 if (strcmp(update, "_reshape_progress") == 0) {
1549 /* We don't support reshape yet */
1550 }
1551
1552 // update_all_csum(ddf);
1553
1554 return rv;
1555 }
1556
1557 static void make_header_guid(char *guid)
1558 {
1559 __u32 stamp;
1560 /* Create a DDF Header of Virtual Disk GUID */
1561
1562 /* 24 bytes of fiction required.
1563 * first 8 are a 'vendor-id' - "Linux-MD"
1564 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
1565 * Remaining 8 random number plus timestamp
1566 */
1567 memcpy(guid, T10, sizeof(T10));
1568 stamp = __cpu_to_be32(0xdeadbeef);
1569 memcpy(guid+8, &stamp, 4);
1570 stamp = __cpu_to_be32(0);
1571 memcpy(guid+12, &stamp, 4);
1572 stamp = __cpu_to_be32(time(0) - DECADE);
1573 memcpy(guid+16, &stamp, 4);
1574 stamp = random32();
1575 memcpy(guid+20, &stamp, 4);
1576 }
1577
1578 static int init_super_ddf_bvd(struct supertype *st,
1579 mdu_array_info_t *info,
1580 unsigned long long size,
1581 char *name, char *homehost,
1582 int *uuid);
1583
1584 static int init_super_ddf(struct supertype *st,
1585 mdu_array_info_t *info,
1586 unsigned long long size, char *name, char *homehost,
1587 int *uuid)
1588 {
1589 /* This is primarily called by Create when creating a new array.
1590 * We will then get add_to_super called for each component, and then
1591 * write_init_super called to write it out to each device.
1592 * For DDF, Create can create on fresh devices or on a pre-existing
1593 * array.
1594 * To create on a pre-existing array a different method will be called.
1595 * This one is just for fresh drives.
1596 *
1597 * We need to create the entire 'ddf' structure which includes:
1598 * DDF headers - these are easy.
1599 * Controller data - a Sector describing this controller .. not that
1600 * this is a controller exactly.
1601 * Physical Disk Record - one entry per device, so
1602 * leave plenty of space.
1603 * Virtual Disk Records - again, just leave plenty of space.
1604 * This just lists VDs, doesn't give details
1605 * Config records - describes the VDs that use this disk
1606 * DiskData - describes 'this' device.
1607 * BadBlockManagement - empty
1608 * Diag Space - empty
1609 * Vendor Logs - Could we put bitmaps here?
1610 *
1611 */
1612 struct ddf_super *ddf;
1613 char hostname[17];
1614 int hostlen;
1615 int max_phys_disks, max_virt_disks;
1616 unsigned long long sector;
1617 int clen;
1618 int i;
1619 int pdsize, vdsize;
1620 struct phys_disk *pd;
1621 struct virtual_disk *vd;
1622
1623 if (st->sb)
1624 return init_super_ddf_bvd(st, info, size, name, homehost, uuid);
1625
1626 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
1627 fprintf(stderr, Name ": %s could not allocate superblock\n", __func__);
1628 return 0;
1629 }
1630 memset(ddf, 0, sizeof(*ddf));
1631 ddf->dlist = NULL; /* no physical disks yet */
1632 ddf->conflist = NULL; /* No virtual disks yet */
1633 st->sb = ddf;
1634
1635 if (info == NULL) {
1636 /* zeroing superblock */
1637 return 0;
1638 }
1639
1640 /* At least 32MB *must* be reserved for the ddf. So let's just
1641 * start 32MB from the end, and put the primary header there.
1642 * Don't do secondary for now.
1643 * We don't know exactly where that will be yet as it could be
1644 * different on each device. To just set up the lengths.
1645 *
1646 */
1647
1648 ddf->anchor.magic = DDF_HEADER_MAGIC;
1649 make_header_guid(ddf->anchor.guid);
1650
1651 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
1652 ddf->anchor.seq = __cpu_to_be32(1);
1653 ddf->anchor.timestamp = __cpu_to_be32(time(0) - DECADE);
1654 ddf->anchor.openflag = 0xFF;
1655 ddf->anchor.foreignflag = 0;
1656 ddf->anchor.enforcegroups = 0; /* Is this best?? */
1657 ddf->anchor.pad0 = 0xff;
1658 memset(ddf->anchor.pad1, 0xff, 12);
1659 memset(ddf->anchor.header_ext, 0xff, 32);
1660 ddf->anchor.primary_lba = ~(__u64)0;
1661 ddf->anchor.secondary_lba = ~(__u64)0;
1662 ddf->anchor.type = DDF_HEADER_ANCHOR;
1663 memset(ddf->anchor.pad2, 0xff, 3);
1664 ddf->anchor.workspace_len = __cpu_to_be32(32768); /* Must be reserved */
1665 ddf->anchor.workspace_lba = ~(__u64)0; /* Put this at bottom
1666 of 32M reserved.. */
1667 max_phys_disks = 1023; /* Should be enough */
1668 ddf->anchor.max_pd_entries = __cpu_to_be16(max_phys_disks);
1669 max_virt_disks = 255;
1670 ddf->anchor.max_vd_entries = __cpu_to_be16(max_virt_disks); /* ?? */
1671 ddf->anchor.max_partitions = __cpu_to_be16(64); /* ?? */
1672 ddf->max_part = 64;
1673 ddf->mppe = 256;
1674 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
1675 ddf->anchor.config_record_len = __cpu_to_be16(ddf->conf_rec_len);
1676 ddf->anchor.max_primary_element_entries = __cpu_to_be16(ddf->mppe);
1677 memset(ddf->anchor.pad3, 0xff, 54);
1678 /* controller sections is one sector long immediately
1679 * after the ddf header */
1680 sector = 1;
1681 ddf->anchor.controller_section_offset = __cpu_to_be32(sector);
1682 ddf->anchor.controller_section_length = __cpu_to_be32(1);
1683 sector += 1;
1684
1685 /* phys is 8 sectors after that */
1686 pdsize = ROUND_UP(sizeof(struct phys_disk) +
1687 sizeof(struct phys_disk_entry)*max_phys_disks,
1688 512);
1689 switch(pdsize/512) {
1690 case 2: case 8: case 32: case 128: case 512: break;
1691 default: abort();
1692 }
1693 ddf->anchor.phys_section_offset = __cpu_to_be32(sector);
1694 ddf->anchor.phys_section_length =
1695 __cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
1696 sector += pdsize/512;
1697
1698 /* virt is another 32 sectors */
1699 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
1700 sizeof(struct virtual_entry) * max_virt_disks,
1701 512);
1702 switch(vdsize/512) {
1703 case 2: case 8: case 32: case 128: case 512: break;
1704 default: abort();
1705 }
1706 ddf->anchor.virt_section_offset = __cpu_to_be32(sector);
1707 ddf->anchor.virt_section_length =
1708 __cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
1709 sector += vdsize/512;
1710
1711 clen = ddf->conf_rec_len * (ddf->max_part+1);
1712 ddf->anchor.config_section_offset = __cpu_to_be32(sector);
1713 ddf->anchor.config_section_length = __cpu_to_be32(clen);
1714 sector += clen;
1715
1716 ddf->anchor.data_section_offset = __cpu_to_be32(sector);
1717 ddf->anchor.data_section_length = __cpu_to_be32(1);
1718 sector += 1;
1719
1720 ddf->anchor.bbm_section_length = __cpu_to_be32(0);
1721 ddf->anchor.bbm_section_offset = __cpu_to_be32(0xFFFFFFFF);
1722 ddf->anchor.diag_space_length = __cpu_to_be32(0);
1723 ddf->anchor.diag_space_offset = __cpu_to_be32(0xFFFFFFFF);
1724 ddf->anchor.vendor_length = __cpu_to_be32(0);
1725 ddf->anchor.vendor_offset = __cpu_to_be32(0xFFFFFFFF);
1726
1727 memset(ddf->anchor.pad4, 0xff, 256);
1728
1729 memcpy(&ddf->primary, &ddf->anchor, 512);
1730 memcpy(&ddf->secondary, &ddf->anchor, 512);
1731
1732 ddf->primary.openflag = 1; /* I guess.. */
1733 ddf->primary.type = DDF_HEADER_PRIMARY;
1734
1735 ddf->secondary.openflag = 1; /* I guess.. */
1736 ddf->secondary.type = DDF_HEADER_SECONDARY;
1737
1738 ddf->active = &ddf->primary;
1739
1740 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
1741
1742 /* 24 more bytes of fiction required.
1743 * first 8 are a 'vendor-id' - "Linux-MD"
1744 * Remaining 16 are serial number.... maybe a hostname would do?
1745 */
1746 memcpy(ddf->controller.guid, T10, sizeof(T10));
1747 gethostname(hostname, sizeof(hostname));
1748 hostname[sizeof(hostname) - 1] = 0;
1749 hostlen = strlen(hostname);
1750 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
1751 for (i = strlen(T10) ; i+hostlen < 24; i++)
1752 ddf->controller.guid[i] = ' ';
1753
1754 ddf->controller.type.vendor_id = __cpu_to_be16(0xDEAD);
1755 ddf->controller.type.device_id = __cpu_to_be16(0xBEEF);
1756 ddf->controller.type.sub_vendor_id = 0;
1757 ddf->controller.type.sub_device_id = 0;
1758 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
1759 memset(ddf->controller.pad, 0xff, 8);
1760 memset(ddf->controller.vendor_data, 0xff, 448);
1761 if (homehost && strlen(homehost) < 440)
1762 strcpy((char*)ddf->controller.vendor_data, homehost);
1763
1764 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
1765 fprintf(stderr, Name ": %s could not allocate pd\n", __func__);
1766 return 0;
1767 }
1768 ddf->phys = pd;
1769 ddf->pdsize = pdsize;
1770
1771 memset(pd, 0xff, pdsize);
1772 memset(pd, 0, sizeof(*pd));
1773 pd->magic = DDF_PHYS_RECORDS_MAGIC;
1774 pd->used_pdes = __cpu_to_be16(0);
1775 pd->max_pdes = __cpu_to_be16(max_phys_disks);
1776 memset(pd->pad, 0xff, 52);
1777
1778 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
1779 fprintf(stderr, Name ": %s could not allocate vd\n", __func__);
1780 return 0;
1781 }
1782 ddf->virt = vd;
1783 ddf->vdsize = vdsize;
1784 memset(vd, 0, vdsize);
1785 vd->magic = DDF_VIRT_RECORDS_MAGIC;
1786 vd->populated_vdes = __cpu_to_be16(0);
1787 vd->max_vdes = __cpu_to_be16(max_virt_disks);
1788 memset(vd->pad, 0xff, 52);
1789
1790 for (i=0; i<max_virt_disks; i++)
1791 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
1792
1793 st->sb = ddf;
1794 ddf->updates_pending = 1;
1795 return 1;
1796 }
1797
1798 static int chunk_to_shift(int chunksize)
1799 {
1800 return ffs(chunksize/512)-1;
1801 }
1802
1803 static int level_to_prl(int level)
1804 {
1805 switch (level) {
1806 case LEVEL_LINEAR: return DDF_CONCAT;
1807 case 0: return DDF_RAID0;
1808 case 1: return DDF_RAID1;
1809 case 4: return DDF_RAID4;
1810 case 5: return DDF_RAID5;
1811 case 6: return DDF_RAID6;
1812 default: return -1;
1813 }
1814 }
1815 static int layout_to_rlq(int level, int layout, int raiddisks)
1816 {
1817 switch(level) {
1818 case 0:
1819 return DDF_RAID0_SIMPLE;
1820 case 1:
1821 switch(raiddisks) {
1822 case 2: return DDF_RAID1_SIMPLE;
1823 case 3: return DDF_RAID1_MULTI;
1824 default: return -1;
1825 }
1826 case 4:
1827 switch(layout) {
1828 case 0: return DDF_RAID4_N;
1829 }
1830 break;
1831 case 5:
1832 switch(layout) {
1833 case ALGORITHM_LEFT_ASYMMETRIC:
1834 return DDF_RAID5_N_RESTART;
1835 case ALGORITHM_RIGHT_ASYMMETRIC:
1836 return DDF_RAID5_0_RESTART;
1837 case ALGORITHM_LEFT_SYMMETRIC:
1838 return DDF_RAID5_N_CONTINUE;
1839 case ALGORITHM_RIGHT_SYMMETRIC:
1840 return -1; /* not mentioned in standard */
1841 }
1842 case 6:
1843 switch(layout) {
1844 case ALGORITHM_ROTATING_N_RESTART:
1845 return DDF_RAID5_N_RESTART;
1846 case ALGORITHM_ROTATING_ZERO_RESTART:
1847 return DDF_RAID6_0_RESTART;
1848 case ALGORITHM_ROTATING_N_CONTINUE:
1849 return DDF_RAID5_N_CONTINUE;
1850 }
1851 }
1852 return -1;
1853 }
1854
1855 static int rlq_to_layout(int rlq, int prl, int raiddisks)
1856 {
1857 switch(prl) {
1858 case DDF_RAID0:
1859 return 0; /* hopefully rlq == DDF_RAID0_SIMPLE */
1860 case DDF_RAID1:
1861 return 0; /* hopefully rlq == SIMPLE or MULTI depending
1862 on raiddisks*/
1863 case DDF_RAID4:
1864 switch(rlq) {
1865 case DDF_RAID4_N:
1866 return 0;
1867 default:
1868 /* not supported */
1869 return -1; /* FIXME this isn't checked */
1870 }
1871 case DDF_RAID5:
1872 switch(rlq) {
1873 case DDF_RAID5_N_RESTART:
1874 return ALGORITHM_LEFT_ASYMMETRIC;
1875 case DDF_RAID5_0_RESTART:
1876 return ALGORITHM_RIGHT_ASYMMETRIC;
1877 case DDF_RAID5_N_CONTINUE:
1878 return ALGORITHM_LEFT_SYMMETRIC;
1879 default:
1880 return -1;
1881 }
1882 case DDF_RAID6:
1883 switch(rlq) {
1884 case DDF_RAID5_N_RESTART:
1885 return ALGORITHM_ROTATING_N_RESTART;
1886 case DDF_RAID6_0_RESTART:
1887 return ALGORITHM_ROTATING_ZERO_RESTART;
1888 case DDF_RAID5_N_CONTINUE:
1889 return ALGORITHM_ROTATING_N_CONTINUE;
1890 default:
1891 return -1;
1892 }
1893 }
1894 return -1;
1895 }
1896
1897 #ifndef MDASSEMBLE
1898 struct extent {
1899 unsigned long long start, size;
1900 };
1901 static int cmp_extent(const void *av, const void *bv)
1902 {
1903 const struct extent *a = av;
1904 const struct extent *b = bv;
1905 if (a->start < b->start)
1906 return -1;
1907 if (a->start > b->start)
1908 return 1;
1909 return 0;
1910 }
1911
1912 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
1913 {
1914 /* find a list of used extents on the give physical device
1915 * (dnum) of the given ddf.
1916 * Return a malloced array of 'struct extent'
1917
1918 FIXME ignore DDF_Legacy devices?
1919
1920 */
1921 struct extent *rv;
1922 int n = 0;
1923 unsigned int i, j;
1924
1925 rv = malloc(sizeof(struct extent) * (ddf->max_part + 2));
1926 if (!rv)
1927 return NULL;
1928
1929 for (i = 0; i < ddf->max_part; i++) {
1930 struct vcl *v = dl->vlist[i];
1931 if (v == NULL)
1932 continue;
1933 for (j = 0; j < v->conf.prim_elmnt_count; j++)
1934 if (v->conf.phys_refnum[j] == dl->disk.refnum) {
1935 /* This device plays role 'j' in 'v'. */
1936 rv[n].start = __be64_to_cpu(v->lba_offset[j]);
1937 rv[n].size = __be64_to_cpu(v->conf.blocks);
1938 n++;
1939 break;
1940 }
1941 }
1942 qsort(rv, n, sizeof(*rv), cmp_extent);
1943
1944 rv[n].start = __be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
1945 rv[n].size = 0;
1946 return rv;
1947 }
1948 #endif
1949
1950 static int init_super_ddf_bvd(struct supertype *st,
1951 mdu_array_info_t *info,
1952 unsigned long long size,
1953 char *name, char *homehost,
1954 int *uuid)
1955 {
1956 /* We are creating a BVD inside a pre-existing container.
1957 * so st->sb is already set.
1958 * We need to create a new vd_config and a new virtual_entry
1959 */
1960 struct ddf_super *ddf = st->sb;
1961 unsigned int venum;
1962 struct virtual_entry *ve;
1963 struct vcl *vcl;
1964 struct vd_config *vc;
1965
1966 if (__be16_to_cpu(ddf->virt->populated_vdes)
1967 >= __be16_to_cpu(ddf->virt->max_vdes)) {
1968 fprintf(stderr, Name": This ddf already has the "
1969 "maximum of %d virtual devices\n",
1970 __be16_to_cpu(ddf->virt->max_vdes));
1971 return 0;
1972 }
1973
1974 if (name)
1975 for (venum = 0; venum < __be16_to_cpu(ddf->virt->max_vdes); venum++)
1976 if (!all_ff(ddf->virt->entries[venum].guid)) {
1977 char *n = ddf->virt->entries[venum].name;
1978
1979 if (strncmp(name, n, 16) == 0) {
1980 fprintf(stderr, Name ": This ddf already"
1981 " has an array called %s\n",
1982 name);
1983 return 0;
1984 }
1985 }
1986
1987 for (venum = 0; venum < __be16_to_cpu(ddf->virt->max_vdes); venum++)
1988 if (all_ff(ddf->virt->entries[venum].guid))
1989 break;
1990 if (venum == __be16_to_cpu(ddf->virt->max_vdes)) {
1991 fprintf(stderr, Name ": Cannot find spare slot for "
1992 "virtual disk - DDF is corrupt\n");
1993 return 0;
1994 }
1995 ve = &ddf->virt->entries[venum];
1996
1997 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
1998 * timestamp, random number
1999 */
2000 make_header_guid(ve->guid);
2001 ve->unit = __cpu_to_be16(info->md_minor);
2002 ve->pad0 = 0xFFFF;
2003 ve->guid_crc = crc32(0, (unsigned char*)ddf->anchor.guid, DDF_GUID_LEN);
2004 ve->type = 0;
2005 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2006 if (info->state & 1) /* clean */
2007 ve->init_state = DDF_init_full;
2008 else
2009 ve->init_state = DDF_init_not;
2010
2011 memset(ve->pad1, 0xff, 14);
2012 memset(ve->name, ' ', 16);
2013 if (name)
2014 strncpy(ve->name, name, 16);
2015 ddf->virt->populated_vdes =
2016 __cpu_to_be16(__be16_to_cpu(ddf->virt->populated_vdes)+1);
2017
2018 /* Now create a new vd_config */
2019 if (posix_memalign((void**)&vcl, 512,
2020 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2021 fprintf(stderr, Name ": %s could not allocate vd_config\n", __func__);
2022 return 0;
2023 }
2024 vcl->lba_offset = (__u64*) &vcl->conf.phys_refnum[ddf->mppe];
2025 vcl->vcnum = venum;
2026 sprintf(st->subarray, "%d", venum);
2027 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2028
2029 vc = &vcl->conf;
2030
2031 vc->magic = DDF_VD_CONF_MAGIC;
2032 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2033 vc->timestamp = __cpu_to_be32(time(0)-DECADE);
2034 vc->seqnum = __cpu_to_be32(1);
2035 memset(vc->pad0, 0xff, 24);
2036 vc->prim_elmnt_count = __cpu_to_be16(info->raid_disks);
2037 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2038 vc->prl = level_to_prl(info->level);
2039 vc->rlq = layout_to_rlq(info->level, info->layout, info->raid_disks);
2040 vc->sec_elmnt_count = 1;
2041 vc->sec_elmnt_seq = 0;
2042 vc->srl = 0;
2043 vc->blocks = __cpu_to_be64(info->size * 2);
2044 vc->array_blocks = __cpu_to_be64(
2045 calc_array_size(info->level, info->raid_disks, info->layout,
2046 info->chunk_size, info->size*2));
2047 memset(vc->pad1, 0xff, 8);
2048 vc->spare_refs[0] = 0xffffffff;
2049 vc->spare_refs[1] = 0xffffffff;
2050 vc->spare_refs[2] = 0xffffffff;
2051 vc->spare_refs[3] = 0xffffffff;
2052 vc->spare_refs[4] = 0xffffffff;
2053 vc->spare_refs[5] = 0xffffffff;
2054 vc->spare_refs[6] = 0xffffffff;
2055 vc->spare_refs[7] = 0xffffffff;
2056 memset(vc->cache_pol, 0, 8);
2057 vc->bg_rate = 0x80;
2058 memset(vc->pad2, 0xff, 3);
2059 memset(vc->pad3, 0xff, 52);
2060 memset(vc->pad4, 0xff, 192);
2061 memset(vc->v0, 0xff, 32);
2062 memset(vc->v1, 0xff, 32);
2063 memset(vc->v2, 0xff, 16);
2064 memset(vc->v3, 0xff, 16);
2065 memset(vc->vendor, 0xff, 32);
2066
2067 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2068 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2069
2070 vcl->next = ddf->conflist;
2071 ddf->conflist = vcl;
2072 ddf->currentconf = vcl;
2073 ddf->updates_pending = 1;
2074 return 1;
2075 }
2076
2077 #ifndef MDASSEMBLE
2078 static void add_to_super_ddf_bvd(struct supertype *st,
2079 mdu_disk_info_t *dk, int fd, char *devname)
2080 {
2081 /* fd and devname identify a device with-in the ddf container (st).
2082 * dk identifies a location in the new BVD.
2083 * We need to find suitable free space in that device and update
2084 * the phys_refnum and lba_offset for the newly created vd_config.
2085 * We might also want to update the type in the phys_disk
2086 * section.
2087 *
2088 * Alternately: fd == -1 and we have already chosen which device to
2089 * use and recorded in dlist->raid_disk;
2090 */
2091 struct dl *dl;
2092 struct ddf_super *ddf = st->sb;
2093 struct vd_config *vc;
2094 __u64 *lba_offset;
2095 unsigned int working;
2096 unsigned int i;
2097 unsigned long long blocks, pos, esize;
2098 struct extent *ex;
2099
2100 if (fd == -1) {
2101 for (dl = ddf->dlist; dl ; dl = dl->next)
2102 if (dl->raiddisk == dk->raid_disk)
2103 break;
2104 } else {
2105 for (dl = ddf->dlist; dl ; dl = dl->next)
2106 if (dl->major == dk->major &&
2107 dl->minor == dk->minor)
2108 break;
2109 }
2110 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2111 return;
2112
2113 vc = &ddf->currentconf->conf;
2114 lba_offset = ddf->currentconf->lba_offset;
2115
2116 ex = get_extents(ddf, dl);
2117 if (!ex)
2118 return;
2119
2120 i = 0; pos = 0;
2121 blocks = __be64_to_cpu(vc->blocks);
2122 if (ddf->currentconf->block_sizes)
2123 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2124
2125 do {
2126 esize = ex[i].start - pos;
2127 if (esize >= blocks)
2128 break;
2129 pos = ex[i].start + ex[i].size;
2130 i++;
2131 } while (ex[i-1].size);
2132
2133 free(ex);
2134 if (esize < blocks)
2135 return;
2136
2137 ddf->currentdev = dk->raid_disk;
2138 vc->phys_refnum[dk->raid_disk] = dl->disk.refnum;
2139 lba_offset[dk->raid_disk] = __cpu_to_be64(pos);
2140
2141 for (i = 0; i < ddf->max_part ; i++)
2142 if (dl->vlist[i] == NULL)
2143 break;
2144 if (i == ddf->max_part)
2145 return;
2146 dl->vlist[i] = ddf->currentconf;
2147
2148 if (fd >= 0)
2149 dl->fd = fd;
2150 if (devname)
2151 dl->devname = devname;
2152
2153 /* Check how many working raid_disks, and if we can mark
2154 * array as optimal yet
2155 */
2156 working = 0;
2157
2158 for (i = 0; i < __be16_to_cpu(vc->prim_elmnt_count); i++)
2159 if (vc->phys_refnum[i] != 0xffffffff)
2160 working++;
2161
2162 /* Find which virtual_entry */
2163 i = ddf->currentconf->vcnum;
2164 if (working == __be16_to_cpu(vc->prim_elmnt_count))
2165 ddf->virt->entries[i].state =
2166 (ddf->virt->entries[i].state & ~DDF_state_mask)
2167 | DDF_state_optimal;
2168
2169 if (vc->prl == DDF_RAID6 &&
2170 working+1 == __be16_to_cpu(vc->prim_elmnt_count))
2171 ddf->virt->entries[i].state =
2172 (ddf->virt->entries[i].state & ~DDF_state_mask)
2173 | DDF_state_part_optimal;
2174
2175 ddf->phys->entries[dl->pdnum].type &= ~__cpu_to_be16(DDF_Global_Spare);
2176 ddf->phys->entries[dl->pdnum].type |= __cpu_to_be16(DDF_Active_in_VD);
2177 ddf->updates_pending = 1;
2178 }
2179
2180 /* add a device to a container, either while creating it or while
2181 * expanding a pre-existing container
2182 */
2183 static int add_to_super_ddf(struct supertype *st,
2184 mdu_disk_info_t *dk, int fd, char *devname)
2185 {
2186 struct ddf_super *ddf = st->sb;
2187 struct dl *dd;
2188 time_t now;
2189 struct tm *tm;
2190 unsigned long long size;
2191 struct phys_disk_entry *pde;
2192 unsigned int n, i;
2193 struct stat stb;
2194
2195 if (ddf->currentconf) {
2196 add_to_super_ddf_bvd(st, dk, fd, devname);
2197 return 0;
2198 }
2199
2200 /* This is device numbered dk->number. We need to create
2201 * a phys_disk entry and a more detailed disk_data entry.
2202 */
2203 fstat(fd, &stb);
2204 if (posix_memalign((void**)&dd, 512,
2205 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2206 fprintf(stderr, Name
2207 ": %s could allocate buffer for new disk, aborting\n",
2208 __func__);
2209 return 1;
2210 }
2211 dd->major = major(stb.st_rdev);
2212 dd->minor = minor(stb.st_rdev);
2213 dd->devname = devname;
2214 dd->fd = fd;
2215 dd->spare = NULL;
2216
2217 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2218 now = time(0);
2219 tm = localtime(&now);
2220 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2221 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2222 *(__u32*)(dd->disk.guid + 16) = random32();
2223 *(__u32*)(dd->disk.guid + 20) = random32();
2224
2225 do {
2226 /* Cannot be bothered finding a CRC of some irrelevant details*/
2227 dd->disk.refnum = random32();
2228 for (i = __be16_to_cpu(ddf->active->max_pd_entries);
2229 i > 0; i--)
2230 if (ddf->phys->entries[i-1].refnum == dd->disk.refnum)
2231 break;
2232 } while (i > 0);
2233
2234 dd->disk.forced_ref = 1;
2235 dd->disk.forced_guid = 1;
2236 memset(dd->disk.vendor, ' ', 32);
2237 memcpy(dd->disk.vendor, "Linux", 5);
2238 memset(dd->disk.pad, 0xff, 442);
2239 for (i = 0; i < ddf->max_part ; i++)
2240 dd->vlist[i] = NULL;
2241
2242 n = __be16_to_cpu(ddf->phys->used_pdes);
2243 pde = &ddf->phys->entries[n];
2244 dd->pdnum = n;
2245
2246 if (st->update_tail) {
2247 int len = (sizeof(struct phys_disk) +
2248 sizeof(struct phys_disk_entry));
2249 struct phys_disk *pd;
2250
2251 pd = malloc(len);
2252 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2253 pd->used_pdes = __cpu_to_be16(n);
2254 pde = &pd->entries[0];
2255 dd->mdupdate = pd;
2256 } else {
2257 n++;
2258 ddf->phys->used_pdes = __cpu_to_be16(n);
2259 }
2260
2261 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2262 pde->refnum = dd->disk.refnum;
2263 pde->type = __cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2264 pde->state = __cpu_to_be16(DDF_Online);
2265 get_dev_size(fd, NULL, &size);
2266 /* We are required to reserve 32Meg, and record the size in sectors */
2267 pde->config_size = __cpu_to_be64( (size - 32*1024*1024) / 512);
2268 sprintf(pde->path, "%17.17s","Information: nil") ;
2269 memset(pde->pad, 0xff, 6);
2270
2271 dd->size = size >> 9;
2272 if (st->update_tail) {
2273 dd->next = ddf->add_list;
2274 ddf->add_list = dd;
2275 } else {
2276 dd->next = ddf->dlist;
2277 ddf->dlist = dd;
2278 ddf->updates_pending = 1;
2279 }
2280
2281 return 0;
2282 }
2283
2284 /*
2285 * This is the write_init_super method for a ddf container. It is
2286 * called when creating a container or adding another device to a
2287 * container.
2288 */
2289
2290 static unsigned char null_conf[4096+512];
2291
2292 static int __write_init_super_ddf(struct supertype *st, int do_close)
2293 {
2294
2295 struct ddf_super *ddf = st->sb;
2296 int i;
2297 struct dl *d;
2298 int n_config;
2299 int conf_size;
2300 int attempts = 0;
2301 int successes = 0;
2302 unsigned long long size, sector;
2303
2304 /* try to write updated metadata,
2305 * if we catch a failure move on to the next disk
2306 */
2307 for (d = ddf->dlist; d; d=d->next) {
2308 int fd = d->fd;
2309
2310 if (fd < 0)
2311 continue;
2312
2313 attempts++;
2314 /* We need to fill in the primary, (secondary) and workspace
2315 * lba's in the headers, set their checksums,
2316 * Also checksum phys, virt....
2317 *
2318 * Then write everything out, finally the anchor is written.
2319 */
2320 get_dev_size(fd, NULL, &size);
2321 size /= 512;
2322 ddf->anchor.workspace_lba = __cpu_to_be64(size - 32*1024*2);
2323 ddf->anchor.primary_lba = __cpu_to_be64(size - 16*1024*2);
2324 ddf->anchor.seq = __cpu_to_be32(1);
2325 memcpy(&ddf->primary, &ddf->anchor, 512);
2326 memcpy(&ddf->secondary, &ddf->anchor, 512);
2327
2328 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
2329 ddf->anchor.seq = 0xFFFFFFFF; /* no sequencing in anchor */
2330 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
2331
2332 ddf->primary.openflag = 0;
2333 ddf->primary.type = DDF_HEADER_PRIMARY;
2334
2335 ddf->secondary.openflag = 0;
2336 ddf->secondary.type = DDF_HEADER_SECONDARY;
2337
2338 ddf->primary.crc = calc_crc(&ddf->primary, 512);
2339 ddf->secondary.crc = calc_crc(&ddf->secondary, 512);
2340
2341 sector = size - 16*1024*2;
2342 lseek64(fd, sector<<9, 0);
2343 if (write(fd, &ddf->primary, 512) < 0)
2344 continue;
2345
2346 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2347 if (write(fd, &ddf->controller, 512) < 0)
2348 continue;
2349
2350 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2351
2352 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2353 continue;
2354
2355 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2356 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2357 continue;
2358
2359 /* Now write lots of config records. */
2360 n_config = ddf->max_part;
2361 conf_size = ddf->conf_rec_len * 512;
2362 for (i = 0 ; i <= n_config ; i++) {
2363 struct vcl *c = d->vlist[i];
2364 if (i == n_config)
2365 c = (struct vcl*)d->spare;
2366
2367 if (c) {
2368 c->conf.crc = calc_crc(&c->conf, conf_size);
2369 if (write(fd, &c->conf, conf_size) < 0)
2370 break;
2371 } else {
2372 char *null_aligned = (char*)((((unsigned long)null_conf)+511)&~511UL);
2373 if (null_conf[0] != 0xff)
2374 memset(null_conf, 0xff, sizeof(null_conf));
2375 unsigned int togo = conf_size;
2376 while (togo > sizeof(null_conf)-512) {
2377 if (write(fd, null_aligned, sizeof(null_conf)-512) < 0)
2378 break;
2379 togo -= sizeof(null_conf)-512;
2380 }
2381 if (write(fd, null_aligned, togo) < 0)
2382 break;
2383 }
2384 }
2385 if (i <= n_config)
2386 continue;
2387 d->disk.crc = calc_crc(&d->disk, 512);
2388 if (write(fd, &d->disk, 512) < 0)
2389 continue;
2390
2391 /* Maybe do the same for secondary */
2392
2393 lseek64(fd, (size-1)*512, SEEK_SET);
2394 if (write(fd, &ddf->anchor, 512) < 0)
2395 continue;
2396 successes++;
2397 }
2398
2399 if (do_close)
2400 for (d = ddf->dlist; d; d=d->next) {
2401 close(d->fd);
2402 d->fd = -1;
2403 }
2404
2405 return attempts != successes;
2406 }
2407
2408 static int write_init_super_ddf(struct supertype *st)
2409 {
2410 struct ddf_super *ddf = st->sb;
2411 struct vcl *currentconf = ddf->currentconf;
2412
2413 /* we are done with currentconf reset it to point st at the container */
2414 ddf->currentconf = NULL;
2415
2416 if (st->update_tail) {
2417 /* queue the virtual_disk and vd_config as metadata updates */
2418 struct virtual_disk *vd;
2419 struct vd_config *vc;
2420 int len;
2421
2422 if (!currentconf) {
2423 int len = (sizeof(struct phys_disk) +
2424 sizeof(struct phys_disk_entry));
2425
2426 /* adding a disk to the container. */
2427 if (!ddf->add_list)
2428 return 0;
2429
2430 append_metadata_update(st, ddf->add_list->mdupdate, len);
2431 ddf->add_list->mdupdate = NULL;
2432 return 0;
2433 }
2434
2435 /* Newly created VD */
2436
2437 /* First the virtual disk. We have a slightly fake header */
2438 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
2439 vd = malloc(len);
2440 *vd = *ddf->virt;
2441 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
2442 vd->populated_vdes = __cpu_to_be16(currentconf->vcnum);
2443 append_metadata_update(st, vd, len);
2444
2445 /* Then the vd_config */
2446 len = ddf->conf_rec_len * 512;
2447 vc = malloc(len);
2448 memcpy(vc, &currentconf->conf, len);
2449 append_metadata_update(st, vc, len);
2450
2451 /* FIXME I need to close the fds! */
2452 return 0;
2453 } else {
2454 struct dl *d;
2455 for (d = ddf->dlist; d; d=d->next)
2456 while (Kill(d->devname, NULL, 0, 1, 1) == 0);
2457 return __write_init_super_ddf(st, 1);
2458 }
2459 }
2460
2461 #endif
2462
2463 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize)
2464 {
2465 /* We must reserve the last 32Meg */
2466 if (devsize <= 32*1024*2)
2467 return 0;
2468 return devsize - 32*1024*2;
2469 }
2470
2471 #ifndef MDASSEMBLE
2472
2473 static int reserve_space(struct supertype *st, int raiddisks,
2474 unsigned long long size, int chunk,
2475 unsigned long long *freesize)
2476 {
2477 /* Find 'raiddisks' spare extents at least 'size' big (but
2478 * only caring about multiples of 'chunk') and remember
2479 * them.
2480 * If the cannot be found, fail.
2481 */
2482 struct dl *dl;
2483 struct ddf_super *ddf = st->sb;
2484 int cnt = 0;
2485
2486 for (dl = ddf->dlist; dl ; dl=dl->next) {
2487 dl->raiddisk = -1;
2488 dl->esize = 0;
2489 }
2490 /* Now find largest extent on each device */
2491 for (dl = ddf->dlist ; dl ; dl=dl->next) {
2492 struct extent *e = get_extents(ddf, dl);
2493 unsigned long long pos = 0;
2494 int i = 0;
2495 int found = 0;
2496 unsigned long long minsize = size;
2497
2498 if (size == 0)
2499 minsize = chunk;
2500
2501 if (!e)
2502 continue;
2503 do {
2504 unsigned long long esize;
2505 esize = e[i].start - pos;
2506 if (esize >= minsize) {
2507 found = 1;
2508 minsize = esize;
2509 }
2510 pos = e[i].start + e[i].size;
2511 i++;
2512 } while (e[i-1].size);
2513 if (found) {
2514 cnt++;
2515 dl->esize = minsize;
2516 }
2517 free(e);
2518 }
2519 if (cnt < raiddisks) {
2520 fprintf(stderr, Name ": not enough devices with space to create array.\n");
2521 return 0; /* No enough free spaces large enough */
2522 }
2523 if (size == 0) {
2524 /* choose the largest size of which there are at least 'raiddisk' */
2525 for (dl = ddf->dlist ; dl ; dl=dl->next) {
2526 struct dl *dl2;
2527 if (dl->esize <= size)
2528 continue;
2529 /* This is bigger than 'size', see if there are enough */
2530 cnt = 0;
2531 for (dl2 = dl; dl2 ; dl2=dl2->next)
2532 if (dl2->esize >= dl->esize)
2533 cnt++;
2534 if (cnt >= raiddisks)
2535 size = dl->esize;
2536 }
2537 if (chunk) {
2538 size = size / chunk;
2539 size *= chunk;
2540 }
2541 *freesize = size;
2542 if (size < 32) {
2543 fprintf(stderr, Name ": not enough spare devices to create array.\n");
2544 return 0;
2545 }
2546 }
2547 /* We have a 'size' of which there are enough spaces.
2548 * We simply do a first-fit */
2549 cnt = 0;
2550 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
2551 if (dl->esize < size)
2552 continue;
2553
2554 dl->raiddisk = cnt;
2555 cnt++;
2556 }
2557 return 1;
2558 }
2559
2560
2561
2562 static int
2563 validate_geometry_ddf_container(struct supertype *st,
2564 int level, int layout, int raiddisks,
2565 int chunk, unsigned long long size,
2566 char *dev, unsigned long long *freesize,
2567 int verbose);
2568
2569 static int validate_geometry_ddf_bvd(struct supertype *st,
2570 int level, int layout, int raiddisks,
2571 int chunk, unsigned long long size,
2572 char *dev, unsigned long long *freesize,
2573 int verbose);
2574
2575 static int validate_geometry_ddf(struct supertype *st,
2576 int level, int layout, int raiddisks,
2577 int chunk, unsigned long long size,
2578 char *dev, unsigned long long *freesize,
2579 int verbose)
2580 {
2581 int fd;
2582 struct mdinfo *sra;
2583 int cfd;
2584
2585 /* ddf potentially supports lots of things, but it depends on
2586 * what devices are offered (and maybe kernel version?)
2587 * If given unused devices, we will make a container.
2588 * If given devices in a container, we will make a BVD.
2589 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
2590 */
2591
2592 if (level == LEVEL_CONTAINER) {
2593 /* Must be a fresh device to add to a container */
2594 return validate_geometry_ddf_container(st, level, layout,
2595 raiddisks, chunk,
2596 size, dev, freesize,
2597 verbose);
2598 }
2599
2600 if (!dev) {
2601 /* Initial sanity check. Exclude illegal levels. */
2602 int i;
2603 for (i=0; ddf_level_num[i].num1 != MAXINT; i++)
2604 if (ddf_level_num[i].num2 == level)
2605 break;
2606 if (ddf_level_num[i].num1 == MAXINT) {
2607 if (verbose)
2608 fprintf(stderr, Name ": DDF does not support level %d arrays\n",
2609 level);
2610 return 0;
2611 }
2612 /* Should check layout? etc */
2613
2614 if (st->sb && freesize) {
2615 /* --create was given a container to create in.
2616 * So we need to check that there are enough
2617 * free spaces and return the amount of space.
2618 * We may as well remember which drives were
2619 * chosen so that add_to_super/getinfo_super
2620 * can return them.
2621 */
2622 return reserve_space(st, raiddisks, size, chunk, freesize);
2623 }
2624 return 1;
2625 }
2626
2627 if (st->sb) {
2628 /* A container has already been opened, so we are
2629 * creating in there. Maybe a BVD, maybe an SVD.
2630 * Should make a distinction one day.
2631 */
2632 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
2633 chunk, size, dev, freesize,
2634 verbose);
2635 }
2636 /* This is the first device for the array.
2637 * If it is a container, we read it in and do automagic allocations,
2638 * no other devices should be given.
2639 * Otherwise it must be a member device of a container, and we
2640 * do manual allocation.
2641 * Later we should check for a BVD and make an SVD.
2642 */
2643 fd = open(dev, O_RDONLY|O_EXCL, 0);
2644 if (fd >= 0) {
2645 sra = sysfs_read(fd, 0, GET_VERSION);
2646 close(fd);
2647 if (sra && sra->array.major_version == -1 &&
2648 strcmp(sra->text_version, "ddf") == 0) {
2649
2650 /* load super */
2651 /* find space for 'n' devices. */
2652 /* remember the devices */
2653 /* Somehow return the fact that we have enough */
2654 }
2655
2656 if (verbose)
2657 fprintf(stderr,
2658 Name ": ddf: Cannot create this array "
2659 "on device %s - a container is required.\n",
2660 dev);
2661 return 0;
2662 }
2663 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
2664 if (verbose)
2665 fprintf(stderr, Name ": ddf: Cannot open %s: %s\n",
2666 dev, strerror(errno));
2667 return 0;
2668 }
2669 /* Well, it is in use by someone, maybe a 'ddf' container. */
2670 cfd = open_container(fd);
2671 if (cfd < 0) {
2672 close(fd);
2673 if (verbose)
2674 fprintf(stderr, Name ": ddf: Cannot use %s: %s\n",
2675 dev, strerror(EBUSY));
2676 return 0;
2677 }
2678 sra = sysfs_read(cfd, 0, GET_VERSION);
2679 close(fd);
2680 if (sra && sra->array.major_version == -1 &&
2681 strcmp(sra->text_version, "ddf") == 0) {
2682 /* This is a member of a ddf container. Load the container
2683 * and try to create a bvd
2684 */
2685 struct ddf_super *ddf;
2686 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL, 1) == 0) {
2687 st->sb = ddf;
2688 st->container_dev = fd2devnum(cfd);
2689 close(cfd);
2690 return validate_geometry_ddf_bvd(st, level, layout,
2691 raiddisks, chunk, size,
2692 dev, freesize,
2693 verbose);
2694 }
2695 close(cfd);
2696 } else /* device may belong to a different container */
2697 return 0;
2698
2699 return 1;
2700 }
2701
2702 static int
2703 validate_geometry_ddf_container(struct supertype *st,
2704 int level, int layout, int raiddisks,
2705 int chunk, unsigned long long size,
2706 char *dev, unsigned long long *freesize,
2707 int verbose)
2708 {
2709 int fd;
2710 unsigned long long ldsize;
2711
2712 if (level != LEVEL_CONTAINER)
2713 return 0;
2714 if (!dev)
2715 return 1;
2716
2717 fd = open(dev, O_RDONLY|O_EXCL, 0);
2718 if (fd < 0) {
2719 if (verbose)
2720 fprintf(stderr, Name ": ddf: Cannot open %s: %s\n",
2721 dev, strerror(errno));
2722 return 0;
2723 }
2724 if (!get_dev_size(fd, dev, &ldsize)) {
2725 close(fd);
2726 return 0;
2727 }
2728 close(fd);
2729
2730 *freesize = avail_size_ddf(st, ldsize >> 9);
2731 if (*freesize == 0)
2732 return 0;
2733
2734 return 1;
2735 }
2736
2737 static int validate_geometry_ddf_bvd(struct supertype *st,
2738 int level, int layout, int raiddisks,
2739 int chunk, unsigned long long size,
2740 char *dev, unsigned long long *freesize,
2741 int verbose)
2742 {
2743 struct stat stb;
2744 struct ddf_super *ddf = st->sb;
2745 struct dl *dl;
2746 unsigned long long pos = 0;
2747 unsigned long long maxsize;
2748 struct extent *e;
2749 int i;
2750 /* ddf/bvd supports lots of things, but not containers */
2751 if (level == LEVEL_CONTAINER) {
2752 if (verbose)
2753 fprintf(stderr, Name ": DDF cannot create a container within an container\n");
2754 return 0;
2755 }
2756 /* We must have the container info already read in. */
2757 if (!ddf)
2758 return 0;
2759
2760 if (!dev) {
2761 /* General test: make sure there is space for
2762 * 'raiddisks' device extents of size 'size'.
2763 */
2764 unsigned long long minsize = size;
2765 int dcnt = 0;
2766 if (minsize == 0)
2767 minsize = 8;
2768 for (dl = ddf->dlist; dl ; dl = dl->next)
2769 {
2770 int found = 0;
2771 pos = 0;
2772
2773 i = 0;
2774 e = get_extents(ddf, dl);
2775 if (!e) continue;
2776 do {
2777 unsigned long long esize;
2778 esize = e[i].start - pos;
2779 if (esize >= minsize)
2780 found = 1;
2781 pos = e[i].start + e[i].size;
2782 i++;
2783 } while (e[i-1].size);
2784 if (found)
2785 dcnt++;
2786 free(e);
2787 }
2788 if (dcnt < raiddisks) {
2789 if (verbose)
2790 fprintf(stderr,
2791 Name ": ddf: Not enough devices with "
2792 "space for this array (%d < %d)\n",
2793 dcnt, raiddisks);
2794 return 0;
2795 }
2796 return 1;
2797 }
2798 /* This device must be a member of the set */
2799 if (stat(dev, &stb) < 0)
2800 return 0;
2801 if ((S_IFMT & stb.st_mode) != S_IFBLK)
2802 return 0;
2803 for (dl = ddf->dlist ; dl ; dl = dl->next) {
2804 if (dl->major == (int)major(stb.st_rdev) &&
2805 dl->minor == (int)minor(stb.st_rdev))
2806 break;
2807 }
2808 if (!dl) {
2809 if (verbose)
2810 fprintf(stderr, Name ": ddf: %s is not in the "
2811 "same DDF set\n",
2812 dev);
2813 return 0;
2814 }
2815 e = get_extents(ddf, dl);
2816 maxsize = 0;
2817 i = 0;
2818 if (e) do {
2819 unsigned long long esize;
2820 esize = e[i].start - pos;
2821 if (esize >= maxsize)
2822 maxsize = esize;
2823 pos = e[i].start + e[i].size;
2824 i++;
2825 } while (e[i-1].size);
2826 *freesize = maxsize;
2827 // FIXME here I am
2828
2829 return 1;
2830 }
2831
2832 static int load_super_ddf_all(struct supertype *st, int fd,
2833 void **sbp, char *devname, int keep_fd)
2834 {
2835 struct mdinfo *sra;
2836 struct ddf_super *super;
2837 struct mdinfo *sd, *best = NULL;
2838 int bestseq = 0;
2839 int seq;
2840 char nm[20];
2841 int dfd;
2842
2843 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
2844 if (!sra)
2845 return 1;
2846 if (sra->array.major_version != -1 ||
2847 sra->array.minor_version != -2 ||
2848 strcmp(sra->text_version, "ddf") != 0)
2849 return 1;
2850
2851 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
2852 return 1;
2853 memset(super, 0, sizeof(*super));
2854
2855 /* first, try each device, and choose the best ddf */
2856 for (sd = sra->devs ; sd ; sd = sd->next) {
2857 int rv;
2858 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
2859 dfd = dev_open(nm, O_RDONLY);
2860 if (dfd < 0)
2861 return 2;
2862 rv = load_ddf_headers(dfd, super, NULL);
2863 close(dfd);
2864 if (rv == 0) {
2865 seq = __be32_to_cpu(super->active->seq);
2866 if (super->active->openflag)
2867 seq--;
2868 if (!best || seq > bestseq) {
2869 bestseq = seq;
2870 best = sd;
2871 }
2872 }
2873 }
2874 if (!best)
2875 return 1;
2876 /* OK, load this ddf */
2877 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
2878 dfd = dev_open(nm, O_RDONLY);
2879 if (dfd < 0)
2880 return 1;
2881 load_ddf_headers(dfd, super, NULL);
2882 load_ddf_global(dfd, super, NULL);
2883 close(dfd);
2884 /* Now we need the device-local bits */
2885 for (sd = sra->devs ; sd ; sd = sd->next) {
2886 int rv;
2887
2888 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
2889 dfd = dev_open(nm, keep_fd? O_RDWR : O_RDONLY);
2890 if (dfd < 0)
2891 return 2;
2892 rv = load_ddf_headers(dfd, super, NULL);
2893 if (rv == 0)
2894 rv = load_ddf_local(dfd, super, NULL, keep_fd);
2895 if (!keep_fd) close(dfd);
2896 if (rv)
2897 return 1;
2898 }
2899 if (st->subarray[0]) {
2900 unsigned long val;
2901 struct vcl *v;
2902 char *ep;
2903
2904 val = strtoul(st->subarray, &ep, 10);
2905 if (*ep != '\0') {
2906 free(super);
2907 return 1;
2908 }
2909
2910 for (v = super->conflist; v; v = v->next)
2911 if (v->vcnum == val)
2912 super->currentconf = v;
2913 if (!super->currentconf) {
2914 free(super);
2915 return 1;
2916 }
2917 }
2918
2919 *sbp = super;
2920 if (st->ss == NULL) {
2921 st->ss = &super_ddf;
2922 st->minor_version = 0;
2923 st->max_devs = 512;
2924 st->container_dev = fd2devnum(fd);
2925 }
2926 st->loaded_container = 1;
2927 return 0;
2928 }
2929 #endif /* MDASSEMBLE */
2930
2931 static struct mdinfo *container_content_ddf(struct supertype *st)
2932 {
2933 /* Given a container loaded by load_super_ddf_all,
2934 * extract information about all the arrays into
2935 * an mdinfo tree.
2936 *
2937 * For each vcl in conflist: create an mdinfo, fill it in,
2938 * then look for matching devices (phys_refnum) in dlist
2939 * and create appropriate device mdinfo.
2940 */
2941 struct ddf_super *ddf = st->sb;
2942 struct mdinfo *rest = NULL;
2943 struct vcl *vc;
2944
2945 for (vc = ddf->conflist ; vc ; vc=vc->next)
2946 {
2947 unsigned int i;
2948 unsigned int j;
2949 struct mdinfo *this;
2950 this = malloc(sizeof(*this));
2951 memset(this, 0, sizeof(*this));
2952 this->next = rest;
2953 rest = this;
2954
2955 this->array.level = map_num1(ddf_level_num, vc->conf.prl);
2956 this->array.raid_disks =
2957 __be16_to_cpu(vc->conf.prim_elmnt_count);
2958 this->array.layout = rlq_to_layout(vc->conf.rlq, vc->conf.prl,
2959 this->array.raid_disks);
2960 this->array.md_minor = -1;
2961 this->array.major_version = -1;
2962 this->array.minor_version = -2;
2963 this->array.ctime = DECADE +
2964 __be32_to_cpu(*(__u32*)(vc->conf.guid+16));
2965 this->array.utime = DECADE +
2966 __be32_to_cpu(vc->conf.timestamp);
2967 this->array.chunk_size = 512 << vc->conf.chunk_shift;
2968
2969 i = vc->vcnum;
2970 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
2971 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
2972 DDF_init_full) {
2973 this->array.state = 0;
2974 this->resync_start = 0;
2975 } else {
2976 this->array.state = 1;
2977 this->resync_start = MaxSector;
2978 }
2979 memcpy(this->name, ddf->virt->entries[i].name, 16);
2980 this->name[16]=0;
2981 for(j=0; j<16; j++)
2982 if (this->name[j] == ' ')
2983 this->name[j] = 0;
2984
2985 memset(this->uuid, 0, sizeof(this->uuid));
2986 this->component_size = __be64_to_cpu(vc->conf.blocks);
2987 this->array.size = this->component_size / 2;
2988 this->container_member = i;
2989
2990 ddf->currentconf = vc;
2991 uuid_from_super_ddf(st, this->uuid);
2992 ddf->currentconf = NULL;
2993
2994 sprintf(this->text_version, "/%s/%d",
2995 devnum2devname(st->container_dev),
2996 this->container_member);
2997
2998 for (i = 0 ; i < ddf->mppe ; i++) {
2999 struct mdinfo *dev;
3000 struct dl *d;
3001 int stt;
3002
3003 if (vc->conf.phys_refnum[i] == 0xFFFFFFFF)
3004 continue;
3005
3006 for (d = ddf->dlist; d ; d=d->next)
3007 if (d->disk.refnum == vc->conf.phys_refnum[i])
3008 break;
3009 if (d == NULL)
3010 /* Haven't found that one yet, maybe there are others */
3011 continue;
3012 stt = __be16_to_cpu(ddf->phys->entries[d->pdnum].state);
3013 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3014 != DDF_Online)
3015 continue;
3016
3017 this->array.working_disks++;
3018
3019 dev = malloc(sizeof(*dev));
3020 memset(dev, 0, sizeof(*dev));
3021 dev->next = this->devs;
3022 this->devs = dev;
3023
3024 dev->disk.number = __be32_to_cpu(d->disk.refnum);
3025 dev->disk.major = d->major;
3026 dev->disk.minor = d->minor;
3027 dev->disk.raid_disk = i;
3028 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3029 dev->recovery_start = MaxSector;
3030
3031 dev->events = __be32_to_cpu(ddf->primary.seq);
3032 dev->data_offset = __be64_to_cpu(vc->lba_offset[i]);
3033 dev->component_size = __be64_to_cpu(vc->conf.blocks);
3034 if (d->devname)
3035 strcpy(dev->name, d->devname);
3036 }
3037 }
3038 return rest;
3039 }
3040
3041 static int store_super_ddf(struct supertype *st, int fd)
3042 {
3043 struct ddf_super *ddf = st->sb;
3044 unsigned long long dsize;
3045 void *buf;
3046 int rc;
3047
3048 if (!ddf)
3049 return 1;
3050
3051 /* ->dlist and ->conflist will be set for updates, currently not
3052 * supported
3053 */
3054 if (ddf->dlist || ddf->conflist)
3055 return 1;
3056
3057 if (!get_dev_size(fd, NULL, &dsize))
3058 return 1;
3059
3060 if (posix_memalign(&buf, 512, 512) != 0)
3061 return 1;
3062 memset(buf, 0, 512);
3063
3064 lseek64(fd, dsize-512, 0);
3065 rc = write(fd, buf, 512);
3066 free(buf);
3067 if (rc < 0)
3068 return 1;
3069 return 0;
3070 }
3071
3072 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3073 {
3074 /*
3075 * return:
3076 * 0 same, or first was empty, and second was copied
3077 * 1 second had wrong number
3078 * 2 wrong uuid
3079 * 3 wrong other info
3080 */
3081 struct ddf_super *first = st->sb;
3082 struct ddf_super *second = tst->sb;
3083
3084 if (!first) {
3085 st->sb = tst->sb;
3086 tst->sb = NULL;
3087 return 0;
3088 }
3089
3090 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3091 return 2;
3092
3093 /* FIXME should I look at anything else? */
3094 return 0;
3095 }
3096
3097 #ifndef MDASSEMBLE
3098 /*
3099 * A new array 'a' has been started which claims to be instance 'inst'
3100 * within container 'c'.
3101 * We need to confirm that the array matches the metadata in 'c' so
3102 * that we don't corrupt any metadata.
3103 */
3104 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
3105 {
3106 dprintf("ddf: open_new %s\n", inst);
3107 a->info.container_member = atoi(inst);
3108 return 0;
3109 }
3110
3111 /*
3112 * The array 'a' is to be marked clean in the metadata.
3113 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
3114 * clean up to the point (in sectors). If that cannot be recorded in the
3115 * metadata, then leave it as dirty.
3116 *
3117 * For DDF, we need to clear the DDF_state_inconsistent bit in the
3118 * !global! virtual_disk.virtual_entry structure.
3119 */
3120 static int ddf_set_array_state(struct active_array *a, int consistent)
3121 {
3122 struct ddf_super *ddf = a->container->sb;
3123 int inst = a->info.container_member;
3124 int old = ddf->virt->entries[inst].state;
3125 if (consistent == 2) {
3126 /* Should check if a recovery should be started FIXME */
3127 consistent = 1;
3128 if (!is_resync_complete(&a->info))
3129 consistent = 0;
3130 }
3131 if (consistent)
3132 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
3133 else
3134 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
3135 if (old != ddf->virt->entries[inst].state)
3136 ddf->updates_pending = 1;
3137
3138 old = ddf->virt->entries[inst].init_state;
3139 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
3140 if (is_resync_complete(&a->info))
3141 ddf->virt->entries[inst].init_state |= DDF_init_full;
3142 else if (a->info.resync_start == 0)
3143 ddf->virt->entries[inst].init_state |= DDF_init_not;
3144 else
3145 ddf->virt->entries[inst].init_state |= DDF_init_quick;
3146 if (old != ddf->virt->entries[inst].init_state)
3147 ddf->updates_pending = 1;
3148
3149 dprintf("ddf mark %d %s %llu\n", inst, consistent?"clean":"dirty",
3150 a->info.resync_start);
3151 return consistent;
3152 }
3153
3154 #define container_of(ptr, type, member) ({ \
3155 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
3156 (type *)( (char *)__mptr - offsetof(type,member) );})
3157 /*
3158 * The state of each disk is stored in the global phys_disk structure
3159 * in phys_disk.entries[n].state.
3160 * This makes various combinations awkward.
3161 * - When a device fails in any array, it must be failed in all arrays
3162 * that include a part of this device.
3163 * - When a component is rebuilding, we cannot include it officially in the
3164 * array unless this is the only array that uses the device.
3165 *
3166 * So: when transitioning:
3167 * Online -> failed, just set failed flag. monitor will propagate
3168 * spare -> online, the device might need to be added to the array.
3169 * spare -> failed, just set failed. Don't worry if in array or not.
3170 */
3171 static void ddf_set_disk(struct active_array *a, int n, int state)
3172 {
3173 struct ddf_super *ddf = a->container->sb;
3174 unsigned int inst = a->info.container_member;
3175 struct vd_config *vc = find_vdcr(ddf, inst);
3176 int pd = find_phys(ddf, vc->phys_refnum[n]);
3177 int i, st, working;
3178 struct mdinfo *mdi;
3179 struct dl *dl;
3180
3181 if (vc == NULL) {
3182 dprintf("ddf: cannot find instance %d!!\n", inst);
3183 return;
3184 }
3185 /* Find the matching slot in 'info'. */
3186 for (mdi = a->info.devs; mdi; mdi = mdi->next)
3187 if (mdi->disk.raid_disk == n)
3188 break;
3189 if (!mdi)
3190 return;
3191
3192 /* and find the 'dl' entry corresponding to that. */
3193 for (dl = ddf->dlist; dl; dl = dl->next)
3194 if (mdi->state_fd >= 0 &&
3195 mdi->disk.major == dl->major &&
3196 mdi->disk.minor == dl->minor)
3197 break;
3198 if (!dl)
3199 return;
3200
3201 if (pd < 0 || pd != dl->pdnum) {
3202 /* disk doesn't currently exist or has changed.
3203 * If it is now in_sync, insert it. */
3204 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
3205 struct vcl *vcl;
3206 pd = dl->pdnum;
3207 vc->phys_refnum[n] = dl->disk.refnum;
3208 vcl = container_of(vc, struct vcl, conf);
3209 vcl->lba_offset[n] = mdi->data_offset;
3210 ddf->phys->entries[pd].type &=
3211 ~__cpu_to_be16(DDF_Global_Spare);
3212 ddf->phys->entries[pd].type |=
3213 __cpu_to_be16(DDF_Active_in_VD);
3214 ddf->updates_pending = 1;
3215 }
3216 } else {
3217 int old = ddf->phys->entries[pd].state;
3218 if (state & DS_FAULTY)
3219 ddf->phys->entries[pd].state |= __cpu_to_be16(DDF_Failed);
3220 if (state & DS_INSYNC) {
3221 ddf->phys->entries[pd].state |= __cpu_to_be16(DDF_Online);
3222 ddf->phys->entries[pd].state &= __cpu_to_be16(~DDF_Rebuilding);
3223 }
3224 if (old != ddf->phys->entries[pd].state)
3225 ddf->updates_pending = 1;
3226 }
3227
3228 dprintf("ddf: set_disk %d to %x\n", n, state);
3229
3230 /* Now we need to check the state of the array and update
3231 * virtual_disk.entries[n].state.
3232 * It needs to be one of "optimal", "degraded", "failed".
3233 * I don't understand 'deleted' or 'missing'.
3234 */
3235 working = 0;
3236 for (i=0; i < a->info.array.raid_disks; i++) {
3237 pd = find_phys(ddf, vc->phys_refnum[i]);
3238 if (pd < 0)
3239 continue;
3240 st = __be16_to_cpu(ddf->phys->entries[pd].state);
3241 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3242 == DDF_Online)
3243 working++;
3244 }
3245 state = DDF_state_degraded;
3246 if (working == a->info.array.raid_disks)
3247 state = DDF_state_optimal;
3248 else switch(vc->prl) {
3249 case DDF_RAID0:
3250 case DDF_CONCAT:
3251 case DDF_JBOD:
3252 state = DDF_state_failed;
3253 break;
3254 case DDF_RAID1:
3255 if (working == 0)
3256 state = DDF_state_failed;
3257 break;
3258 case DDF_RAID4:
3259 case DDF_RAID5:
3260 if (working < a->info.array.raid_disks-1)
3261 state = DDF_state_failed;
3262 break;
3263 case DDF_RAID6:
3264 if (working < a->info.array.raid_disks-2)
3265 state = DDF_state_failed;
3266 else if (working == a->info.array.raid_disks-1)
3267 state = DDF_state_part_optimal;
3268 break;
3269 }
3270
3271 if (ddf->virt->entries[inst].state !=
3272 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
3273 | state)) {
3274
3275 ddf->virt->entries[inst].state =
3276 (ddf->virt->entries[inst].state & ~DDF_state_mask)
3277 | state;
3278 ddf->updates_pending = 1;
3279 }
3280
3281 }
3282
3283 static void ddf_sync_metadata(struct supertype *st)
3284 {
3285
3286 /*
3287 * Write all data to all devices.
3288 * Later, we might be able to track whether only local changes
3289 * have been made, or whether any global data has been changed,
3290 * but ddf is sufficiently weird that it probably always
3291 * changes global data ....
3292 */
3293 struct ddf_super *ddf = st->sb;
3294 if (!ddf->updates_pending)
3295 return;
3296 ddf->updates_pending = 0;
3297 __write_init_super_ddf(st, 0);
3298 dprintf("ddf: sync_metadata\n");
3299 }
3300
3301 static void ddf_process_update(struct supertype *st,
3302 struct metadata_update *update)
3303 {
3304 /* Apply this update to the metadata.
3305 * The first 4 bytes are a DDF_*_MAGIC which guides
3306 * our actions.
3307 * Possible update are:
3308 * DDF_PHYS_RECORDS_MAGIC
3309 * Add a new physical device. Changes to this record
3310 * only happen implicitly.
3311 * used_pdes is the device number.
3312 * DDF_VIRT_RECORDS_MAGIC
3313 * Add a new VD. Possibly also change the 'access' bits.
3314 * populated_vdes is the entry number.
3315 * DDF_VD_CONF_MAGIC
3316 * New or updated VD. the VIRT_RECORD must already
3317 * exist. For an update, phys_refnum and lba_offset
3318 * (at least) are updated, and the VD_CONF must
3319 * be written to precisely those devices listed with
3320 * a phys_refnum.
3321 * DDF_SPARE_ASSIGN_MAGIC
3322 * replacement Spare Assignment Record... but for which device?
3323 *
3324 * So, e.g.:
3325 * - to create a new array, we send a VIRT_RECORD and
3326 * a VD_CONF. Then assemble and start the array.
3327 * - to activate a spare we send a VD_CONF to add the phys_refnum
3328 * and offset. This will also mark the spare as active with
3329 * a spare-assignment record.
3330 */
3331 struct ddf_super *ddf = st->sb;
3332 __u32 *magic = (__u32*)update->buf;
3333 struct phys_disk *pd;
3334 struct virtual_disk *vd;
3335 struct vd_config *vc;
3336 struct vcl *vcl;
3337 struct dl *dl;
3338 unsigned int mppe;
3339 unsigned int ent;
3340 unsigned int pdnum, pd2;
3341
3342 dprintf("Process update %x\n", *magic);
3343
3344 switch (*magic) {
3345 case DDF_PHYS_RECORDS_MAGIC:
3346
3347 if (update->len != (sizeof(struct phys_disk) +
3348 sizeof(struct phys_disk_entry)))
3349 return;
3350 pd = (struct phys_disk*)update->buf;
3351
3352 ent = __be16_to_cpu(pd->used_pdes);
3353 if (ent >= __be16_to_cpu(ddf->phys->max_pdes))
3354 return;
3355 if (!all_ff(ddf->phys->entries[ent].guid))
3356 return;
3357 ddf->phys->entries[ent] = pd->entries[0];
3358 ddf->phys->used_pdes = __cpu_to_be16(1 +
3359 __be16_to_cpu(ddf->phys->used_pdes));
3360 ddf->updates_pending = 1;
3361 if (ddf->add_list) {
3362 struct active_array *a;
3363 struct dl *al = ddf->add_list;
3364 ddf->add_list = al->next;
3365
3366 al->next = ddf->dlist;
3367 ddf->dlist = al;
3368
3369 /* As a device has been added, we should check
3370 * for any degraded devices that might make
3371 * use of this spare */
3372 for (a = st->arrays ; a; a=a->next)
3373 a->check_degraded = 1;
3374 }
3375 break;
3376
3377 case DDF_VIRT_RECORDS_MAGIC:
3378
3379 if (update->len != (sizeof(struct virtual_disk) +
3380 sizeof(struct virtual_entry)))
3381 return;
3382 vd = (struct virtual_disk*)update->buf;
3383
3384 ent = __be16_to_cpu(vd->populated_vdes);
3385 if (ent >= __be16_to_cpu(ddf->virt->max_vdes))
3386 return;
3387 if (!all_ff(ddf->virt->entries[ent].guid))
3388 return;
3389 ddf->virt->entries[ent] = vd->entries[0];
3390 ddf->virt->populated_vdes = __cpu_to_be16(1 +
3391 __be16_to_cpu(ddf->virt->populated_vdes));
3392 ddf->updates_pending = 1;
3393 break;
3394
3395 case DDF_VD_CONF_MAGIC:
3396 dprintf("len %d %d\n", update->len, ddf->conf_rec_len);
3397
3398 mppe = __be16_to_cpu(ddf->anchor.max_primary_element_entries);
3399 if ((unsigned)update->len != ddf->conf_rec_len * 512)
3400 return;
3401 vc = (struct vd_config*)update->buf;
3402 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
3403 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
3404 break;
3405 dprintf("vcl = %p\n", vcl);
3406 if (vcl) {
3407 /* An update, just copy the phys_refnum and lba_offset
3408 * fields
3409 */
3410 memcpy(vcl->conf.phys_refnum, vc->phys_refnum,
3411 mppe * (sizeof(__u32) + sizeof(__u64)));
3412 } else {
3413 /* A new VD_CONF */
3414 if (!update->space)
3415 return;
3416 vcl = update->space;
3417 update->space = NULL;
3418 vcl->next = ddf->conflist;
3419 memcpy(&vcl->conf, vc, update->len);
3420 vcl->lba_offset = (__u64*)
3421 &vcl->conf.phys_refnum[mppe];
3422 for (ent = 0;
3423 ent < __be16_to_cpu(ddf->virt->populated_vdes);
3424 ent++)
3425 if (memcmp(vc->guid, ddf->virt->entries[ent].guid,
3426 DDF_GUID_LEN) == 0) {
3427 vcl->vcnum = ent;
3428 break;
3429 }
3430 ddf->conflist = vcl;
3431 }
3432 /* Set DDF_Transition on all Failed devices - to help
3433 * us detect those that are no longer in use
3434 */
3435 for (pdnum = 0; pdnum < __be16_to_cpu(ddf->phys->used_pdes); pdnum++)
3436 if (ddf->phys->entries[pdnum].state
3437 & __be16_to_cpu(DDF_Failed))
3438 ddf->phys->entries[pdnum].state
3439 |= __be16_to_cpu(DDF_Transition);
3440 /* Now make sure vlist is correct for each dl. */
3441 for (dl = ddf->dlist; dl; dl = dl->next) {
3442 unsigned int dn;
3443 unsigned int vn = 0;
3444 int in_degraded = 0;
3445 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
3446 for (dn=0; dn < ddf->mppe ; dn++)
3447 if (vcl->conf.phys_refnum[dn] ==
3448 dl->disk.refnum) {
3449 int vstate;
3450 dprintf("dev %d has %p at %d\n",
3451 dl->pdnum, vcl, vn);
3452 /* Clear the Transition flag */
3453 if (ddf->phys->entries[dl->pdnum].state
3454 & __be16_to_cpu(DDF_Failed))
3455 ddf->phys->entries[dl->pdnum].state &=
3456 ~__be16_to_cpu(DDF_Transition);
3457
3458 dl->vlist[vn++] = vcl;
3459 vstate = ddf->virt->entries[vcl->vcnum].state
3460 & DDF_state_mask;
3461 if (vstate == DDF_state_degraded ||
3462 vstate == DDF_state_part_optimal)
3463 in_degraded = 1;
3464 break;
3465 }
3466 while (vn < ddf->max_part)
3467 dl->vlist[vn++] = NULL;
3468 if (dl->vlist[0]) {
3469 ddf->phys->entries[dl->pdnum].type &=
3470 ~__cpu_to_be16(DDF_Global_Spare);
3471 if (!(ddf->phys->entries[dl->pdnum].type &
3472 __cpu_to_be16(DDF_Active_in_VD))) {
3473 ddf->phys->entries[dl->pdnum].type |=
3474 __cpu_to_be16(DDF_Active_in_VD);
3475 if (in_degraded)
3476 ddf->phys->entries[dl->pdnum].state |=
3477 __cpu_to_be16(DDF_Rebuilding);
3478 }
3479 }
3480 if (dl->spare) {
3481 ddf->phys->entries[dl->pdnum].type &=
3482 ~__cpu_to_be16(DDF_Global_Spare);
3483 ddf->phys->entries[dl->pdnum].type |=
3484 __cpu_to_be16(DDF_Spare);
3485 }
3486 if (!dl->vlist[0] && !dl->spare) {
3487 ddf->phys->entries[dl->pdnum].type |=
3488 __cpu_to_be16(DDF_Global_Spare);
3489 ddf->phys->entries[dl->pdnum].type &=
3490 ~__cpu_to_be16(DDF_Spare |
3491 DDF_Active_in_VD);
3492 }
3493 }
3494
3495 /* Now remove any 'Failed' devices that are not part
3496 * of any VD. They will have the Transition flag set.
3497 * Once done, we need to update all dl->pdnum numbers.
3498 */
3499 pd2 = 0;
3500 for (pdnum = 0; pdnum < __be16_to_cpu(ddf->phys->used_pdes); pdnum++)
3501 if ((ddf->phys->entries[pdnum].state
3502 & __be16_to_cpu(DDF_Failed))
3503 && (ddf->phys->entries[pdnum].state
3504 & __be16_to_cpu(DDF_Transition)))
3505 /* skip this one */;
3506 else if (pdnum == pd2)
3507 pd2++;
3508 else {
3509 ddf->phys->entries[pd2] = ddf->phys->entries[pdnum];
3510 for (dl = ddf->dlist; dl; dl = dl->next)
3511 if (dl->pdnum == (int)pdnum)
3512 dl->pdnum = pd2;
3513 pd2++;
3514 }
3515 ddf->phys->used_pdes = __cpu_to_be16(pd2);
3516 while (pd2 < pdnum) {
3517 memset(ddf->phys->entries[pd2].guid, 0xff, DDF_GUID_LEN);
3518 pd2++;
3519 }
3520
3521 ddf->updates_pending = 1;
3522 break;
3523 case DDF_SPARE_ASSIGN_MAGIC:
3524 default: break;
3525 }
3526 }
3527
3528 static void ddf_prepare_update(struct supertype *st,
3529 struct metadata_update *update)
3530 {
3531 /* This update arrived at managemon.
3532 * We are about to pass it to monitor.
3533 * If a malloc is needed, do it here.
3534 */
3535 struct ddf_super *ddf = st->sb;
3536 __u32 *magic = (__u32*)update->buf;
3537 if (*magic == DDF_VD_CONF_MAGIC)
3538 if (posix_memalign(&update->space, 512,
3539 offsetof(struct vcl, conf)
3540 + ddf->conf_rec_len * 512) != 0)
3541 update->space = NULL;
3542 }
3543
3544 /*
3545 * Check if the array 'a' is degraded but not failed.
3546 * If it is, find as many spares as are available and needed and
3547 * arrange for their inclusion.
3548 * We only choose devices which are not already in the array,
3549 * and prefer those with a spare-assignment to this array.
3550 * otherwise we choose global spares - assuming always that
3551 * there is enough room.
3552 * For each spare that we assign, we return an 'mdinfo' which
3553 * describes the position for the device in the array.
3554 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
3555 * the new phys_refnum and lba_offset values.
3556 *
3557 * Only worry about BVDs at the moment.
3558 */
3559 static struct mdinfo *ddf_activate_spare(struct active_array *a,
3560 struct metadata_update **updates)
3561 {
3562 int working = 0;
3563 struct mdinfo *d;
3564 struct ddf_super *ddf = a->container->sb;
3565 int global_ok = 0;
3566 struct mdinfo *rv = NULL;
3567 struct mdinfo *di;
3568 struct metadata_update *mu;
3569 struct dl *dl;
3570 int i;
3571 struct vd_config *vc;
3572 __u64 *lba;
3573
3574 for (d = a->info.devs ; d ; d = d->next) {
3575 if ((d->curr_state & DS_FAULTY) &&
3576 d->state_fd >= 0)
3577 /* wait for Removal to happen */
3578 return NULL;
3579 if (d->state_fd >= 0)
3580 working ++;
3581 }
3582
3583 dprintf("ddf_activate: working=%d (%d) level=%d\n", working, a->info.array.raid_disks,
3584 a->info.array.level);
3585 if (working == a->info.array.raid_disks)
3586 return NULL; /* array not degraded */
3587 switch (a->info.array.level) {
3588 case 1:
3589 if (working == 0)
3590 return NULL; /* failed */
3591 break;
3592 case 4:
3593 case 5:
3594 if (working < a->info.array.raid_disks - 1)
3595 return NULL; /* failed */
3596 break;
3597 case 6:
3598 if (working < a->info.array.raid_disks - 2)
3599 return NULL; /* failed */
3600 break;
3601 default: /* concat or stripe */
3602 return NULL; /* failed */
3603 }
3604
3605 /* For each slot, if it is not working, find a spare */
3606 dl = ddf->dlist;
3607 for (i = 0; i < a->info.array.raid_disks; i++) {
3608 for (d = a->info.devs ; d ; d = d->next)
3609 if (d->disk.raid_disk == i)
3610 break;
3611 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
3612 if (d && (d->state_fd >= 0))
3613 continue;
3614
3615 /* OK, this device needs recovery. Find a spare */
3616 again:
3617 for ( ; dl ; dl = dl->next) {
3618 unsigned long long esize;
3619 unsigned long long pos;
3620 struct mdinfo *d2;
3621 int is_global = 0;
3622 int is_dedicated = 0;
3623 struct extent *ex;
3624 unsigned int j;
3625 /* If in this array, skip */
3626 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
3627 if (d2->state_fd >= 0 &&
3628 d2->disk.major == dl->major &&
3629 d2->disk.minor == dl->minor) {
3630 dprintf("%x:%x already in array\n", dl->major, dl->minor);
3631 break;
3632 }
3633 if (d2)
3634 continue;
3635 if (ddf->phys->entries[dl->pdnum].type &
3636 __cpu_to_be16(DDF_Spare)) {
3637 /* Check spare assign record */
3638 if (dl->spare) {
3639 if (dl->spare->type & DDF_spare_dedicated) {
3640 /* check spare_ents for guid */
3641 for (j = 0 ;
3642 j < __be16_to_cpu(dl->spare->populated);
3643 j++) {
3644 if (memcmp(dl->spare->spare_ents[j].guid,
3645 ddf->virt->entries[a->info.container_member].guid,
3646 DDF_GUID_LEN) == 0)
3647 is_dedicated = 1;
3648 }
3649 } else
3650 is_global = 1;
3651 }
3652 } else if (ddf->phys->entries[dl->pdnum].type &
3653 __cpu_to_be16(DDF_Global_Spare)) {
3654 is_global = 1;
3655 }
3656 if ( ! (is_dedicated ||
3657 (is_global && global_ok))) {
3658 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
3659 is_dedicated, is_global);
3660 continue;
3661 }
3662
3663 /* We are allowed to use this device - is there space?
3664 * We need a->info.component_size sectors */
3665 ex = get_extents(ddf, dl);
3666 if (!ex) {
3667 dprintf("cannot get extents\n");
3668 continue;
3669 }
3670 j = 0; pos = 0;
3671 esize = 0;
3672
3673 do {
3674 esize = ex[j].start - pos;
3675 if (esize >= a->info.component_size)
3676 break;
3677 pos = ex[j].start + ex[j].size;
3678 j++;
3679 } while (ex[j-1].size);
3680
3681 free(ex);
3682 if (esize < a->info.component_size) {
3683 dprintf("%x:%x has no room: %llu %llu\n",
3684 dl->major, dl->minor,
3685 esize, a->info.component_size);
3686 /* No room */
3687 continue;
3688 }
3689
3690 /* Cool, we have a device with some space at pos */
3691 di = malloc(sizeof(*di));
3692 if (!di)
3693 continue;
3694 memset(di, 0, sizeof(*di));
3695 di->disk.number = i;
3696 di->disk.raid_disk = i;
3697 di->disk.major = dl->major;
3698 di->disk.minor = dl->minor;
3699 di->disk.state = 0;
3700 di->recovery_start = 0;
3701 di->data_offset = pos;
3702 di->component_size = a->info.component_size;
3703 di->container_member = dl->pdnum;
3704 di->next = rv;
3705 rv = di;
3706 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
3707 i, pos);
3708
3709 break;
3710 }
3711 if (!dl && ! global_ok) {
3712 /* not enough dedicated spares, try global */
3713 global_ok = 1;
3714 dl = ddf->dlist;
3715 goto again;
3716 }
3717 }
3718
3719 if (!rv)
3720 /* No spares found */
3721 return rv;
3722 /* Now 'rv' has a list of devices to return.
3723 * Create a metadata_update record to update the
3724 * phys_refnum and lba_offset values
3725 */
3726 mu = malloc(sizeof(*mu));
3727 if (mu && posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
3728 free(mu);
3729 mu = NULL;
3730 }
3731 if (!mu) {
3732 while (rv) {
3733 struct mdinfo *n = rv->next;
3734
3735 free(rv);
3736 rv = n;
3737 }
3738 return NULL;
3739 }
3740
3741 mu->buf = malloc(ddf->conf_rec_len * 512);
3742 mu->len = ddf->conf_rec_len * 512;
3743 mu->space = NULL;
3744 mu->next = *updates;
3745 vc = find_vdcr(ddf, a->info.container_member);
3746 memcpy(mu->buf, vc, ddf->conf_rec_len * 512);
3747
3748 vc = (struct vd_config*)mu->buf;
3749 lba = (__u64*)&vc->phys_refnum[ddf->mppe];
3750 for (di = rv ; di ; di = di->next) {
3751 vc->phys_refnum[di->disk.raid_disk] =
3752 ddf->phys->entries[dl->pdnum].refnum;
3753 lba[di->disk.raid_disk] = di->data_offset;
3754 }
3755 *updates = mu;
3756 return rv;
3757 }
3758 #endif /* MDASSEMBLE */
3759
3760 static int ddf_level_to_layout(int level)
3761 {
3762 switch(level) {
3763 case 0:
3764 case 1:
3765 return 0;
3766 case 5:
3767 return ALGORITHM_LEFT_SYMMETRIC;
3768 case 6:
3769 return ALGORITHM_ROTATING_N_CONTINUE;
3770 case 10:
3771 return 0x102;
3772 default:
3773 return UnSet;
3774 }
3775 }
3776
3777 struct superswitch super_ddf = {
3778 #ifndef MDASSEMBLE
3779 .examine_super = examine_super_ddf,
3780 .brief_examine_super = brief_examine_super_ddf,
3781 .brief_examine_subarrays = brief_examine_subarrays_ddf,
3782 .export_examine_super = export_examine_super_ddf,
3783 .detail_super = detail_super_ddf,
3784 .brief_detail_super = brief_detail_super_ddf,
3785 .validate_geometry = validate_geometry_ddf,
3786 .write_init_super = write_init_super_ddf,
3787 .add_to_super = add_to_super_ddf,
3788 #endif
3789 .match_home = match_home_ddf,
3790 .uuid_from_super= uuid_from_super_ddf,
3791 .getinfo_super = getinfo_super_ddf,
3792 .update_super = update_super_ddf,
3793
3794 .avail_size = avail_size_ddf,
3795
3796 .compare_super = compare_super_ddf,
3797
3798 .load_super = load_super_ddf,
3799 .init_super = init_super_ddf,
3800 .store_super = store_super_ddf,
3801 .free_super = free_super_ddf,
3802 .match_metadata_desc = match_metadata_desc_ddf,
3803 .container_content = container_content_ddf,
3804 .default_layout = ddf_level_to_layout,
3805
3806 .external = 1,
3807
3808 #ifndef MDASSEMBLE
3809 /* for mdmon */
3810 .open_new = ddf_open_new,
3811 .set_array_state= ddf_set_array_state,
3812 .set_disk = ddf_set_disk,
3813 .sync_metadata = ddf_sync_metadata,
3814 .process_update = ddf_process_update,
3815 .prepare_update = ddf_prepare_update,
3816 .activate_spare = ddf_activate_spare,
3817 #endif
3818 .name = "ddf",
3819 };