]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
Honor safemode_delay at Create() and Incremental() time
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2007 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 /* The DDF metadata handling.
48 * DDF metadata lives at the end of the device.
49 * The last 512 byte block provides an 'anchor' which is used to locate
50 * the rest of the metadata which usually lives immediately behind the anchor.
51 *
52 * Note:
53 * - all multibyte numeric fields are bigendian.
54 * - all strings are space padded.
55 *
56 */
57
58 /* Primary Raid Level (PRL) */
59 #define DDF_RAID0 0x00
60 #define DDF_RAID1 0x01
61 #define DDF_RAID3 0x03
62 #define DDF_RAID4 0x04
63 #define DDF_RAID5 0x05
64 #define DDF_RAID1E 0x11
65 #define DDF_JBOD 0x0f
66 #define DDF_CONCAT 0x1f
67 #define DDF_RAID5E 0x15
68 #define DDF_RAID5EE 0x25
69 #define DDF_RAID6 0x06
70
71 /* Raid Level Qualifier (RLQ) */
72 #define DDF_RAID0_SIMPLE 0x00
73 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
74 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
75 #define DDF_RAID3_0 0x00 /* parity in first extent */
76 #define DDF_RAID3_N 0x01 /* parity in last extent */
77 #define DDF_RAID4_0 0x00 /* parity in first extent */
78 #define DDF_RAID4_N 0x01 /* parity in last extent */
79 /* these apply to raid5e and raid5ee as well */
80 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
81 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
82 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
83 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
84
85 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
86 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
87
88 /* Secondary RAID Level (SRL) */
89 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
90 #define DDF_2MIRRORED 0x01
91 #define DDF_2CONCAT 0x02
92 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
93
94 /* Magic numbers */
95 #define DDF_HEADER_MAGIC __cpu_to_be32(0xDE11DE11)
96 #define DDF_CONTROLLER_MAGIC __cpu_to_be32(0xAD111111)
97 #define DDF_PHYS_RECORDS_MAGIC __cpu_to_be32(0x22222222)
98 #define DDF_PHYS_DATA_MAGIC __cpu_to_be32(0x33333333)
99 #define DDF_VIRT_RECORDS_MAGIC __cpu_to_be32(0xDDDDDDDD)
100 #define DDF_VD_CONF_MAGIC __cpu_to_be32(0xEEEEEEEE)
101 #define DDF_SPARE_ASSIGN_MAGIC __cpu_to_be32(0x55555555)
102 #define DDF_VU_CONF_MAGIC __cpu_to_be32(0x88888888)
103 #define DDF_VENDOR_LOG_MAGIC __cpu_to_be32(0x01dBEEF0)
104 #define DDF_BBM_LOG_MAGIC __cpu_to_be32(0xABADB10C)
105
106 #define DDF_GUID_LEN 24
107 #define DDF_REVISION_0 "01.00.00"
108 #define DDF_REVISION_2 "01.02.00"
109
110 struct ddf_header {
111 __u32 magic; /* DDF_HEADER_MAGIC */
112 __u32 crc;
113 char guid[DDF_GUID_LEN];
114 char revision[8]; /* 01.02.00 */
115 __u32 seq; /* starts at '1' */
116 __u32 timestamp;
117 __u8 openflag;
118 __u8 foreignflag;
119 __u8 enforcegroups;
120 __u8 pad0; /* 0xff */
121 __u8 pad1[12]; /* 12 * 0xff */
122 /* 64 bytes so far */
123 __u8 header_ext[32]; /* reserved: fill with 0xff */
124 __u64 primary_lba;
125 __u64 secondary_lba;
126 __u8 type;
127 __u8 pad2[3]; /* 0xff */
128 __u32 workspace_len; /* sectors for vendor space -
129 * at least 32768(sectors) */
130 __u64 workspace_lba;
131 __u16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
132 __u16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
133 __u16 max_partitions; /* i.e. max num of configuration
134 record entries per disk */
135 __u16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
136 *12/512) */
137 __u16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
138 __u8 pad3[54]; /* 0xff */
139 /* 192 bytes so far */
140 __u32 controller_section_offset;
141 __u32 controller_section_length;
142 __u32 phys_section_offset;
143 __u32 phys_section_length;
144 __u32 virt_section_offset;
145 __u32 virt_section_length;
146 __u32 config_section_offset;
147 __u32 config_section_length;
148 __u32 data_section_offset;
149 __u32 data_section_length;
150 __u32 bbm_section_offset;
151 __u32 bbm_section_length;
152 __u32 diag_space_offset;
153 __u32 diag_space_length;
154 __u32 vendor_offset;
155 __u32 vendor_length;
156 /* 256 bytes so far */
157 __u8 pad4[256]; /* 0xff */
158 };
159
160 /* type field */
161 #define DDF_HEADER_ANCHOR 0x00
162 #define DDF_HEADER_PRIMARY 0x01
163 #define DDF_HEADER_SECONDARY 0x02
164
165 /* The content of the 'controller section' - global scope */
166 struct ddf_controller_data {
167 __u32 magic; /* DDF_CONTROLLER_MAGIC */
168 __u32 crc;
169 char guid[DDF_GUID_LEN];
170 struct controller_type {
171 __u16 vendor_id;
172 __u16 device_id;
173 __u16 sub_vendor_id;
174 __u16 sub_device_id;
175 } type;
176 char product_id[16];
177 __u8 pad[8]; /* 0xff */
178 __u8 vendor_data[448];
179 };
180
181 /* The content of phys_section - global scope */
182 struct phys_disk {
183 __u32 magic; /* DDF_PHYS_RECORDS_MAGIC */
184 __u32 crc;
185 __u16 used_pdes;
186 __u16 max_pdes;
187 __u8 pad[52];
188 struct phys_disk_entry {
189 char guid[DDF_GUID_LEN];
190 __u32 refnum;
191 __u16 type;
192 __u16 state;
193 __u64 config_size; /* DDF structures must be after here */
194 char path[18]; /* another horrible structure really */
195 __u8 pad[6];
196 } entries[0];
197 };
198
199 /* phys_disk_entry.type is a bitmap - bigendian remember */
200 #define DDF_Forced_PD_GUID 1
201 #define DDF_Active_in_VD 2
202 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
203 #define DDF_Spare 8 /* overrides Global_spare */
204 #define DDF_Foreign 16
205 #define DDF_Legacy 32 /* no DDF on this device */
206
207 #define DDF_Interface_mask 0xf00
208 #define DDF_Interface_SCSI 0x100
209 #define DDF_Interface_SAS 0x200
210 #define DDF_Interface_SATA 0x300
211 #define DDF_Interface_FC 0x400
212
213 /* phys_disk_entry.state is a bigendian bitmap */
214 #define DDF_Online 1
215 #define DDF_Failed 2 /* overrides 1,4,8 */
216 #define DDF_Rebuilding 4
217 #define DDF_Transition 8
218 #define DDF_SMART 16
219 #define DDF_ReadErrors 32
220 #define DDF_Missing 64
221
222 /* The content of the virt_section global scope */
223 struct virtual_disk {
224 __u32 magic; /* DDF_VIRT_RECORDS_MAGIC */
225 __u32 crc;
226 __u16 populated_vdes;
227 __u16 max_vdes;
228 __u8 pad[52];
229 struct virtual_entry {
230 char guid[DDF_GUID_LEN];
231 __u16 unit;
232 __u16 pad0; /* 0xffff */
233 __u16 guid_crc;
234 __u16 type;
235 __u8 state;
236 __u8 init_state;
237 __u8 pad1[14];
238 char name[16];
239 } entries[0];
240 };
241
242 /* virtual_entry.type is a bitmap - bigendian */
243 #define DDF_Shared 1
244 #define DDF_Enforce_Groups 2
245 #define DDF_Unicode 4
246 #define DDF_Owner_Valid 8
247
248 /* virtual_entry.state is a bigendian bitmap */
249 #define DDF_state_mask 0x7
250 #define DDF_state_optimal 0x0
251 #define DDF_state_degraded 0x1
252 #define DDF_state_deleted 0x2
253 #define DDF_state_missing 0x3
254 #define DDF_state_failed 0x4
255 #define DDF_state_part_optimal 0x5
256
257 #define DDF_state_morphing 0x8
258 #define DDF_state_inconsistent 0x10
259
260 /* virtual_entry.init_state is a bigendian bitmap */
261 #define DDF_initstate_mask 0x03
262 #define DDF_init_not 0x00
263 #define DDF_init_quick 0x01 /* initialisation is progress.
264 * i.e. 'state_inconsistent' */
265 #define DDF_init_full 0x02
266
267 #define DDF_access_mask 0xc0
268 #define DDF_access_rw 0x00
269 #define DDF_access_ro 0x80
270 #define DDF_access_blocked 0xc0
271
272 /* The content of the config_section - local scope
273 * It has multiple records each config_record_len sectors
274 * They can be vd_config or spare_assign
275 */
276
277 struct vd_config {
278 __u32 magic; /* DDF_VD_CONF_MAGIC */
279 __u32 crc;
280 char guid[DDF_GUID_LEN];
281 __u32 timestamp;
282 __u32 seqnum;
283 __u8 pad0[24];
284 __u16 prim_elmnt_count;
285 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
286 __u8 prl;
287 __u8 rlq;
288 __u8 sec_elmnt_count;
289 __u8 sec_elmnt_seq;
290 __u8 srl;
291 __u64 blocks; /* blocks per component could be different
292 * on different component devices...(only
293 * for concat I hope) */
294 __u64 array_blocks; /* blocks in array */
295 __u8 pad1[8];
296 __u32 spare_refs[8];
297 __u8 cache_pol[8];
298 __u8 bg_rate;
299 __u8 pad2[3];
300 __u8 pad3[52];
301 __u8 pad4[192];
302 __u8 v0[32]; /* reserved- 0xff */
303 __u8 v1[32]; /* reserved- 0xff */
304 __u8 v2[16]; /* reserved- 0xff */
305 __u8 v3[16]; /* reserved- 0xff */
306 __u8 vendor[32];
307 __u32 phys_refnum[0]; /* refnum of each disk in sequence */
308 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
309 bvd are always the same size */
310 };
311
312 /* vd_config.cache_pol[7] is a bitmap */
313 #define DDF_cache_writeback 1 /* else writethrough */
314 #define DDF_cache_wadaptive 2 /* only applies if writeback */
315 #define DDF_cache_readahead 4
316 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
317 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
318 #define DDF_cache_wallowed 32 /* enable write caching */
319 #define DDF_cache_rallowed 64 /* enable read caching */
320
321 struct spare_assign {
322 __u32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
323 __u32 crc;
324 __u32 timestamp;
325 __u8 reserved[7];
326 __u8 type;
327 __u16 populated; /* SAEs used */
328 __u16 max; /* max SAEs */
329 __u8 pad[8];
330 struct spare_assign_entry {
331 char guid[DDF_GUID_LEN];
332 __u16 secondary_element;
333 __u8 pad[6];
334 } spare_ents[0];
335 };
336 /* spare_assign.type is a bitmap */
337 #define DDF_spare_dedicated 0x1 /* else global */
338 #define DDF_spare_revertible 0x2 /* else committable */
339 #define DDF_spare_active 0x4 /* else not active */
340 #define DDF_spare_affinity 0x8 /* enclosure affinity */
341
342 /* The data_section contents - local scope */
343 struct disk_data {
344 __u32 magic; /* DDF_PHYS_DATA_MAGIC */
345 __u32 crc;
346 char guid[DDF_GUID_LEN];
347 __u32 refnum; /* crc of some magic drive data ... */
348 __u8 forced_ref; /* set when above was not result of magic */
349 __u8 forced_guid; /* set if guid was forced rather than magic */
350 __u8 vendor[32];
351 __u8 pad[442];
352 };
353
354 /* bbm_section content */
355 struct bad_block_log {
356 __u32 magic;
357 __u32 crc;
358 __u16 entry_count;
359 __u32 spare_count;
360 __u8 pad[10];
361 __u64 first_spare;
362 struct mapped_block {
363 __u64 defective_start;
364 __u32 replacement_start;
365 __u16 remap_count;
366 __u8 pad[2];
367 } entries[0];
368 };
369
370 /* Struct for internally holding ddf structures */
371 /* The DDF structure stored on each device is potentially
372 * quite different, as some data is global and some is local.
373 * The global data is:
374 * - ddf header
375 * - controller_data
376 * - Physical disk records
377 * - Virtual disk records
378 * The local data is:
379 * - Configuration records
380 * - Physical Disk data section
381 * ( and Bad block and vendor which I don't care about yet).
382 *
383 * The local data is parsed into separate lists as it is read
384 * and reconstructed for writing. This means that we only need
385 * to make config changes once and they are automatically
386 * propagated to all devices.
387 * Note that the ddf_super has space of the conf and disk data
388 * for this disk and also for a list of all such data.
389 * The list is only used for the superblock that is being
390 * built in Create or Assemble to describe the whole array.
391 */
392 struct ddf_super {
393 struct ddf_header anchor, primary, secondary;
394 struct ddf_controller_data controller;
395 struct ddf_header *active;
396 struct phys_disk *phys;
397 struct virtual_disk *virt;
398 int pdsize, vdsize;
399 int max_part, mppe, conf_rec_len;
400 int currentdev;
401 int updates_pending;
402 struct vcl {
403 union {
404 char space[512];
405 struct {
406 struct vcl *next;
407 __u64 *lba_offset; /* location in 'conf' of
408 * the lba table */
409 int vcnum; /* index into ->virt */
410 __u64 *block_sizes; /* NULL if all the same */
411 };
412 };
413 struct vd_config conf;
414 } *conflist, *currentconf;
415 struct dl {
416 union {
417 char space[512];
418 struct {
419 struct dl *next;
420 int major, minor;
421 char *devname;
422 int fd;
423 unsigned long long size; /* sectors */
424 int pdnum; /* index in ->phys */
425 struct spare_assign *spare;
426 };
427 };
428 struct disk_data disk;
429 void *mdupdate; /* hold metadata update */
430 struct vcl *vlist[0]; /* max_part in size */
431 } *dlist, *add_list;
432 };
433
434 #ifndef offsetof
435 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
436 #endif
437
438
439 static int calc_crc(void *buf, int len)
440 {
441 /* crcs are always at the same place as in the ddf_header */
442 struct ddf_header *ddf = buf;
443 __u32 oldcrc = ddf->crc;
444 __u32 newcrc;
445 ddf->crc = 0xffffffff;
446
447 newcrc = crc32(0, buf, len);
448 ddf->crc = oldcrc;
449 return newcrc;
450 }
451
452 static int load_ddf_header(int fd, unsigned long long lba,
453 unsigned long long size,
454 int type,
455 struct ddf_header *hdr, struct ddf_header *anchor)
456 {
457 /* read a ddf header (primary or secondary) from fd/lba
458 * and check that it is consistent with anchor
459 * Need to check:
460 * magic, crc, guid, rev, and LBA's header_type, and
461 * everything after header_type must be the same
462 */
463 if (lba >= size-1)
464 return 0;
465
466 if (lseek64(fd, lba<<9, 0) < 0)
467 return 0;
468
469 if (read(fd, hdr, 512) != 512)
470 return 0;
471
472 if (hdr->magic != DDF_HEADER_MAGIC)
473 return 0;
474 if (calc_crc(hdr, 512) != hdr->crc)
475 return 0;
476 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
477 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
478 anchor->primary_lba != hdr->primary_lba ||
479 anchor->secondary_lba != hdr->secondary_lba ||
480 hdr->type != type ||
481 memcmp(anchor->pad2, hdr->pad2, 512 -
482 offsetof(struct ddf_header, pad2)) != 0)
483 return 0;
484
485 /* Looks good enough to me... */
486 return 1;
487 }
488
489 static void *load_section(int fd, struct ddf_super *super, void *buf,
490 __u32 offset_be, __u32 len_be, int check)
491 {
492 unsigned long long offset = __be32_to_cpu(offset_be);
493 unsigned long long len = __be32_to_cpu(len_be);
494 int dofree = (buf == NULL);
495
496 if (check)
497 if (len != 2 && len != 8 && len != 32
498 && len != 128 && len != 512)
499 return NULL;
500
501 if (len > 1024)
502 return NULL;
503 if (buf) {
504 /* All pre-allocated sections are a single block */
505 if (len != 1)
506 return NULL;
507 } else {
508 posix_memalign(&buf, 512, len<<9);
509 }
510
511 if (!buf)
512 return NULL;
513
514 if (super->active->type == 1)
515 offset += __be64_to_cpu(super->active->primary_lba);
516 else
517 offset += __be64_to_cpu(super->active->secondary_lba);
518
519 if (lseek64(fd, offset<<9, 0) != (offset<<9)) {
520 if (dofree)
521 free(buf);
522 return NULL;
523 }
524 if (read(fd, buf, len<<9) != (len<<9)) {
525 if (dofree)
526 free(buf);
527 return NULL;
528 }
529 return buf;
530 }
531
532 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
533 {
534 unsigned long long dsize;
535
536 get_dev_size(fd, NULL, &dsize);
537
538 if (lseek64(fd, dsize-512, 0) < 0) {
539 if (devname)
540 fprintf(stderr,
541 Name": Cannot seek to anchor block on %s: %s\n",
542 devname, strerror(errno));
543 return 1;
544 }
545 if (read(fd, &super->anchor, 512) != 512) {
546 if (devname)
547 fprintf(stderr,
548 Name ": Cannot read anchor block on %s: %s\n",
549 devname, strerror(errno));
550 return 1;
551 }
552 if (super->anchor.magic != DDF_HEADER_MAGIC) {
553 if (devname)
554 fprintf(stderr, Name ": no DDF anchor found on %s\n",
555 devname);
556 return 2;
557 }
558 if (calc_crc(&super->anchor, 512) != super->anchor.crc) {
559 if (devname)
560 fprintf(stderr, Name ": bad CRC on anchor on %s\n",
561 devname);
562 return 2;
563 }
564 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
565 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
566 if (devname)
567 fprintf(stderr, Name ": can only support super revision"
568 " %.8s and earlier, not %.8s on %s\n",
569 DDF_REVISION_2, super->anchor.revision,devname);
570 return 2;
571 }
572 if (load_ddf_header(fd, __be64_to_cpu(super->anchor.primary_lba),
573 dsize >> 9, 1,
574 &super->primary, &super->anchor) == 0) {
575 if (devname)
576 fprintf(stderr,
577 Name ": Failed to load primary DDF header "
578 "on %s\n", devname);
579 return 2;
580 }
581 super->active = &super->primary;
582 if (load_ddf_header(fd, __be64_to_cpu(super->anchor.secondary_lba),
583 dsize >> 9, 2,
584 &super->secondary, &super->anchor)) {
585 if ((__be32_to_cpu(super->primary.seq)
586 < __be32_to_cpu(super->secondary.seq) &&
587 !super->secondary.openflag)
588 || (__be32_to_cpu(super->primary.seq)
589 == __be32_to_cpu(super->secondary.seq) &&
590 super->primary.openflag && !super->secondary.openflag)
591 )
592 super->active = &super->secondary;
593 }
594 return 0;
595 }
596
597 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
598 {
599 void *ok;
600 ok = load_section(fd, super, &super->controller,
601 super->active->controller_section_offset,
602 super->active->controller_section_length,
603 0);
604 super->phys = load_section(fd, super, NULL,
605 super->active->phys_section_offset,
606 super->active->phys_section_length,
607 1);
608 super->pdsize = __be32_to_cpu(super->active->phys_section_length) * 512;
609
610 super->virt = load_section(fd, super, NULL,
611 super->active->virt_section_offset,
612 super->active->virt_section_length,
613 1);
614 super->vdsize = __be32_to_cpu(super->active->virt_section_length) * 512;
615 if (!ok ||
616 !super->phys ||
617 !super->virt) {
618 free(super->phys);
619 free(super->virt);
620 super->phys = NULL;
621 super->virt = NULL;
622 return 2;
623 }
624 super->conflist = NULL;
625 super->dlist = NULL;
626
627 super->max_part = __be16_to_cpu(super->active->max_partitions);
628 super->mppe = __be16_to_cpu(super->active->max_primary_element_entries);
629 super->conf_rec_len = __be16_to_cpu(super->active->config_record_len);
630 return 0;
631 }
632
633 static int load_ddf_local(int fd, struct ddf_super *super,
634 char *devname, int keep)
635 {
636 struct dl *dl;
637 struct stat stb;
638 char *conf;
639 int i;
640 int vnum;
641 int max_virt_disks = __be16_to_cpu(super->active->max_vd_entries);
642 unsigned long long dsize;
643
644 /* First the local disk info */
645 posix_memalign((void**)&dl, 512,
646 sizeof(*dl) +
647 (super->max_part) * sizeof(dl->vlist[0]));
648
649 load_section(fd, super, &dl->disk,
650 super->active->data_section_offset,
651 super->active->data_section_length,
652 0);
653 dl->devname = devname ? strdup(devname) : NULL;
654
655 fstat(fd, &stb);
656 dl->major = major(stb.st_rdev);
657 dl->minor = minor(stb.st_rdev);
658 dl->next = super->dlist;
659 dl->fd = keep ? fd : -1;
660
661 dl->size = 0;
662 if (get_dev_size(fd, devname, &dsize))
663 dl->size = dsize >> 9;
664 dl->spare = NULL;
665 for (i=0 ; i < super->max_part ; i++)
666 dl->vlist[i] = NULL;
667 super->dlist = dl;
668 dl->pdnum = -1;
669 for (i=0; i < __be16_to_cpu(super->active->max_pd_entries); i++)
670 if (memcmp(super->phys->entries[i].guid,
671 dl->disk.guid, DDF_GUID_LEN) == 0)
672 dl->pdnum = i;
673
674 /* Now the config list. */
675 /* 'conf' is an array of config entries, some of which are
676 * probably invalid. Those which are good need to be copied into
677 * the conflist
678 */
679
680 conf = load_section(fd, super, NULL,
681 super->active->config_section_offset,
682 super->active->config_section_length,
683 0);
684
685 vnum = 0;
686 for (i = 0;
687 i < __be32_to_cpu(super->active->config_section_length);
688 i += super->conf_rec_len) {
689 struct vd_config *vd =
690 (struct vd_config *)((char*)conf + i*512);
691 struct vcl *vcl;
692
693 if (vd->magic == DDF_SPARE_ASSIGN_MAGIC) {
694 if (dl->spare)
695 continue;
696 posix_memalign((void**)&dl->spare, 512,
697 super->conf_rec_len*512);
698 memcpy(dl->spare, vd, super->conf_rec_len*512);
699 continue;
700 }
701 if (vd->magic != DDF_VD_CONF_MAGIC)
702 continue;
703 for (vcl = super->conflist; vcl; vcl = vcl->next) {
704 if (memcmp(vcl->conf.guid,
705 vd->guid, DDF_GUID_LEN) == 0)
706 break;
707 }
708
709 if (vcl) {
710 dl->vlist[vnum++] = vcl;
711 if (__be32_to_cpu(vd->seqnum) <=
712 __be32_to_cpu(vcl->conf.seqnum))
713 continue;
714 } else {
715 posix_memalign((void**)&vcl, 512,
716 (super->conf_rec_len*512 +
717 offsetof(struct vcl, conf)));
718 vcl->next = super->conflist;
719 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
720 super->conflist = vcl;
721 dl->vlist[vnum++] = vcl;
722 }
723 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
724 vcl->lba_offset = (__u64*)
725 &vcl->conf.phys_refnum[super->mppe];
726
727 for (i=0; i < max_virt_disks ; i++)
728 if (memcmp(super->virt->entries[i].guid,
729 vcl->conf.guid, DDF_GUID_LEN)==0)
730 break;
731 if (i < max_virt_disks)
732 vcl->vcnum = i;
733 }
734 free(conf);
735
736 return 0;
737 }
738
739 #ifndef MDASSEMBLE
740 static int load_super_ddf_all(struct supertype *st, int fd,
741 void **sbp, char *devname, int keep_fd);
742 #endif
743 static int load_super_ddf(struct supertype *st, int fd,
744 char *devname)
745 {
746 unsigned long long dsize;
747 struct ddf_super *super;
748 int rv;
749
750 #ifndef MDASSEMBLE
751 /* if 'fd' is a container, load metadata from all the devices */
752 if (load_super_ddf_all(st, fd, &st->sb, devname, 1) == 0)
753 return 0;
754 #endif
755 if (st->subarray[0])
756 return 1; /* FIXME Is this correct */
757
758 if (get_dev_size(fd, devname, &dsize) == 0)
759 return 1;
760
761 /* 32M is a lower bound */
762 if (dsize <= 32*1024*1024) {
763 if (devname) {
764 fprintf(stderr,
765 Name ": %s is too small for ddf: "
766 "size is %llu sectors.\n",
767 devname, dsize>>9);
768 return 1;
769 }
770 }
771 if (dsize & 511) {
772 if (devname) {
773 fprintf(stderr,
774 Name ": %s is an odd size for ddf: "
775 "size is %llu bytes.\n",
776 devname, dsize);
777 return 1;
778 }
779 }
780
781 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
782 fprintf(stderr, Name ": malloc of %zu failed.\n",
783 sizeof(*super));
784 return 1;
785 }
786 memset(super, 0, sizeof(*super));
787
788 rv = load_ddf_headers(fd, super, devname);
789 if (rv) {
790 free(super);
791 return rv;
792 }
793
794 /* Have valid headers and have chosen the best. Let's read in the rest*/
795
796 rv = load_ddf_global(fd, super, devname);
797
798 if (rv) {
799 if (devname)
800 fprintf(stderr,
801 Name ": Failed to load all information "
802 "sections on %s\n", devname);
803 free(super);
804 return rv;
805 }
806
807 load_ddf_local(fd, super, devname, 0);
808
809 /* Should possibly check the sections .... */
810
811 st->sb = super;
812 if (st->ss == NULL) {
813 st->ss = &super_ddf;
814 st->minor_version = 0;
815 st->max_devs = 512;
816 }
817 return 0;
818
819 }
820
821 static void free_super_ddf(struct supertype *st)
822 {
823 struct ddf_super *ddf = st->sb;
824 if (ddf == NULL)
825 return;
826 free(ddf->phys);
827 free(ddf->virt);
828 while (ddf->conflist) {
829 struct vcl *v = ddf->conflist;
830 ddf->conflist = v->next;
831 if (v->block_sizes)
832 free(v->block_sizes);
833 free(v);
834 }
835 while (ddf->dlist) {
836 struct dl *d = ddf->dlist;
837 ddf->dlist = d->next;
838 if (d->fd >= 0)
839 close(d->fd);
840 if (d->spare)
841 free(d->spare);
842 free(d);
843 }
844 free(ddf);
845 st->sb = NULL;
846 }
847
848 static struct supertype *match_metadata_desc_ddf(char *arg)
849 {
850 /* 'ddf' only support containers */
851 struct supertype *st;
852 if (strcmp(arg, "ddf") != 0 &&
853 strcmp(arg, "default") != 0
854 )
855 return NULL;
856
857 st = malloc(sizeof(*st));
858 memset(st, 0, sizeof(*st));
859 st->ss = &super_ddf;
860 st->max_devs = 512;
861 st->minor_version = 0;
862 st->sb = NULL;
863 return st;
864 }
865
866
867 #ifndef MDASSEMBLE
868
869 static mapping_t ddf_state[] = {
870 { "Optimal", 0},
871 { "Degraded", 1},
872 { "Deleted", 2},
873 { "Missing", 3},
874 { "Failed", 4},
875 { "Partially Optimal", 5},
876 { "-reserved-", 6},
877 { "-reserved-", 7},
878 { NULL, 0}
879 };
880
881 static mapping_t ddf_init_state[] = {
882 { "Not Initialised", 0},
883 { "QuickInit in Progress", 1},
884 { "Fully Initialised", 2},
885 { "*UNKNOWN*", 3},
886 { NULL, 0}
887 };
888 static mapping_t ddf_access[] = {
889 { "Read/Write", 0},
890 { "Reserved", 1},
891 { "Read Only", 2},
892 { "Blocked (no access)", 3},
893 { NULL ,0}
894 };
895
896 static mapping_t ddf_level[] = {
897 { "RAID0", DDF_RAID0},
898 { "RAID1", DDF_RAID1},
899 { "RAID3", DDF_RAID3},
900 { "RAID4", DDF_RAID4},
901 { "RAID5", DDF_RAID5},
902 { "RAID1E",DDF_RAID1E},
903 { "JBOD", DDF_JBOD},
904 { "CONCAT",DDF_CONCAT},
905 { "RAID5E",DDF_RAID5E},
906 { "RAID5EE",DDF_RAID5EE},
907 { "RAID6", DDF_RAID6},
908 { NULL, 0}
909 };
910 static mapping_t ddf_sec_level[] = {
911 { "Striped", DDF_2STRIPED},
912 { "Mirrored", DDF_2MIRRORED},
913 { "Concat", DDF_2CONCAT},
914 { "Spanned", DDF_2SPANNED},
915 { NULL, 0}
916 };
917 #endif
918
919 struct num_mapping {
920 int num1, num2;
921 };
922 static struct num_mapping ddf_level_num[] = {
923 { DDF_RAID0, 0 },
924 { DDF_RAID1, 1 },
925 { DDF_RAID3, LEVEL_UNSUPPORTED },
926 { DDF_RAID4, 4 },
927 { DDF_RAID5, 5 },
928 { DDF_RAID1E, LEVEL_UNSUPPORTED },
929 { DDF_JBOD, LEVEL_UNSUPPORTED },
930 { DDF_CONCAT, LEVEL_LINEAR },
931 { DDF_RAID5E, LEVEL_UNSUPPORTED },
932 { DDF_RAID5EE, LEVEL_UNSUPPORTED },
933 { DDF_RAID6, 6},
934 { MAXINT, MAXINT }
935 };
936
937 static int map_num1(struct num_mapping *map, int num)
938 {
939 int i;
940 for (i=0 ; map[i].num1 != MAXINT; i++)
941 if (map[i].num1 == num)
942 break;
943 return map[i].num2;
944 }
945
946 #ifndef MDASSEMBLE
947 static void print_guid(char *guid, int tstamp)
948 {
949 /* A GUIDs are part (or all) ASCII and part binary.
950 * They tend to be space padded.
951 * We print the GUID in HEX, then in parentheses add
952 * any initial ASCII sequence, and a possible
953 * time stamp from bytes 16-19
954 */
955 int l = DDF_GUID_LEN;
956 int i;
957
958 for (i=0 ; i<DDF_GUID_LEN ; i++) {
959 if ((i&3)==0 && i != 0) printf(":");
960 printf("%02X", guid[i]&255);
961 }
962
963 printf(" (");
964 while (l && guid[l-1] == ' ')
965 l--;
966 for (i=0 ; i<l ; i++) {
967 if (guid[i] >= 0x20 && guid[i] < 0x7f)
968 fputc(guid[i], stdout);
969 else
970 break;
971 }
972 if (tstamp) {
973 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
974 char tbuf[100];
975 struct tm *tm;
976 tm = localtime(&then);
977 strftime(tbuf, 100, " %D %T",tm);
978 fputs(tbuf, stdout);
979 }
980 printf(")");
981 }
982
983 static void examine_vd(int n, struct ddf_super *sb, char *guid)
984 {
985 int crl = sb->conf_rec_len;
986 struct vcl *vcl;
987
988 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
989 struct vd_config *vc = &vcl->conf;
990
991 if (calc_crc(vc, crl*512) != vc->crc)
992 continue;
993 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
994 continue;
995
996 /* Ok, we know about this VD, let's give more details */
997 printf(" Raid Devices[%d] : %d\n", n,
998 __be16_to_cpu(vc->prim_elmnt_count));
999 printf(" Chunk Size[%d] : %d sectors\n", n,
1000 1 << vc->chunk_shift);
1001 printf(" Raid Level[%d] : %s\n", n,
1002 map_num(ddf_level, vc->prl)?:"-unknown-");
1003 if (vc->sec_elmnt_count != 1) {
1004 printf(" Secondary Position[%d] : %d of %d\n", n,
1005 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1006 printf(" Secondary Level[%d] : %s\n", n,
1007 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1008 }
1009 printf(" Device Size[%d] : %llu\n", n,
1010 __be64_to_cpu(vc->blocks)/2);
1011 printf(" Array Size[%d] : %llu\n", n,
1012 __be64_to_cpu(vc->array_blocks)/2);
1013 }
1014 }
1015
1016 static void examine_vds(struct ddf_super *sb)
1017 {
1018 int cnt = __be16_to_cpu(sb->virt->populated_vdes);
1019 int i;
1020 printf(" Virtual Disks : %d\n", cnt);
1021
1022 for (i=0; i<cnt; i++) {
1023 struct virtual_entry *ve = &sb->virt->entries[i];
1024 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1025 printf("\n");
1026 printf(" unit[%d] : %d\n", i, __be16_to_cpu(ve->unit));
1027 printf(" state[%d] : %s, %s%s\n", i,
1028 map_num(ddf_state, ve->state & 7),
1029 (ve->state & 8) ? "Morphing, ": "",
1030 (ve->state & 16)? "Not Consistent" : "Consistent");
1031 printf(" init state[%d] : %s\n", i,
1032 map_num(ddf_init_state, ve->init_state&3));
1033 printf(" access[%d] : %s\n", i,
1034 map_num(ddf_access, (ve->init_state>>6) & 3));
1035 printf(" Name[%d] : %.16s\n", i, ve->name);
1036 examine_vd(i, sb, ve->guid);
1037 }
1038 if (cnt) printf("\n");
1039 }
1040
1041 static void examine_pds(struct ddf_super *sb)
1042 {
1043 int cnt = __be16_to_cpu(sb->phys->used_pdes);
1044 int i;
1045 struct dl *dl;
1046 printf(" Physical Disks : %d\n", cnt);
1047
1048 for (i=0 ; i<cnt ; i++) {
1049 struct phys_disk_entry *pd = &sb->phys->entries[i];
1050 int type = __be16_to_cpu(pd->type);
1051 int state = __be16_to_cpu(pd->state);
1052
1053 printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1054 printf("\n");
1055 printf(" ref[%d] : %08x\n", i,
1056 __be32_to_cpu(pd->refnum));
1057 printf(" mode[%d] : %s%s%s%s%s\n", i,
1058 (type&2) ? "active":"",
1059 (type&4) ? "Global Spare":"",
1060 (type&8) ? "spare" : "",
1061 (type&16)? ", foreign" : "",
1062 (type&32)? "pass-through" : "");
1063 printf(" state[%d] : %s%s%s%s%s%s%s\n", i,
1064 (state&1)? "Online": "Offline",
1065 (state&2)? ", Failed": "",
1066 (state&4)? ", Rebuilding": "",
1067 (state&8)? ", in-transition": "",
1068 (state&16)? ", SMART errors": "",
1069 (state&32)? ", Unrecovered Read Errors": "",
1070 (state&64)? ", Missing" : "");
1071 printf(" Avail Size[%d] : %llu K\n", i,
1072 __be64_to_cpu(pd->config_size)>>1);
1073 for (dl = sb->dlist; dl ; dl = dl->next) {
1074 if (dl->disk.refnum == pd->refnum) {
1075 char *dv = map_dev(dl->major, dl->minor, 0);
1076 if (dv)
1077 printf(" Device[%d] : %s\n",
1078 i, dv);
1079 }
1080 }
1081 printf("\n");
1082 }
1083 }
1084
1085 static void examine_super_ddf(struct supertype *st, char *homehost)
1086 {
1087 struct ddf_super *sb = st->sb;
1088
1089 printf(" Magic : %08x\n", __be32_to_cpu(sb->anchor.magic));
1090 printf(" Version : %.8s\n", sb->anchor.revision);
1091 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1092 printf("\n");
1093 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1094 printf("\n");
1095 printf(" Seq : %08x\n", __be32_to_cpu(sb->active->seq));
1096 printf(" Redundant hdr : %s\n", sb->secondary.magic == DDF_HEADER_MAGIC
1097 ?"yes" : "no");
1098 examine_vds(sb);
1099 examine_pds(sb);
1100 }
1101
1102 static void brief_examine_super_ddf(struct supertype *st)
1103 {
1104 /* We just write a generic DDF ARRAY entry
1105 * The uuid is all hex, 6 groups of 4 bytes
1106 */
1107 struct ddf_super *ddf = st->sb;
1108 int i;
1109 printf("ARRAY /dev/ddf metadata=ddf UUID=");
1110 for (i = 0; i < DDF_GUID_LEN; i++) {
1111 if ((i&3) == 0 && i != 0)
1112 printf(":");
1113 printf("%02X", 255&ddf->anchor.guid[i]);
1114 }
1115 printf("\n");
1116 }
1117
1118 static void detail_super_ddf(struct supertype *st, char *homehost)
1119 {
1120 /* FIXME later
1121 * Could print DDF GUID
1122 * Need to find which array
1123 * If whole, briefly list all arrays
1124 * If one, give name
1125 */
1126 }
1127
1128 static void brief_detail_super_ddf(struct supertype *st)
1129 {
1130 /* FIXME I really need to know which array we are detailing.
1131 * Can that be stored in ddf_super??
1132 */
1133 // struct ddf_super *ddf = st->sb;
1134 }
1135 #endif
1136
1137 static int match_home_ddf(struct supertype *st, char *homehost)
1138 {
1139 /* It matches 'this' host if the controller is a
1140 * Linux-MD controller with vendor_data matching
1141 * the hostname
1142 */
1143 struct ddf_super *ddf = st->sb;
1144 int len = strlen(homehost);
1145
1146 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1147 len < sizeof(ddf->controller.vendor_data) &&
1148 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1149 ddf->controller.vendor_data[len] == 0);
1150 }
1151
1152 static struct vd_config *find_vdcr(struct ddf_super *ddf, int inst)
1153 {
1154 struct vcl *v;
1155
1156 for (v = ddf->conflist; v; v = v->next)
1157 if (inst == v->vcnum)
1158 return &v->conf;
1159 return NULL;
1160 }
1161
1162 static int find_phys(struct ddf_super *ddf, __u32 phys_refnum)
1163 {
1164 /* Find the entry in phys_disk which has the given refnum
1165 * and return it's index
1166 */
1167 int i;
1168 for (i=0; i < __be16_to_cpu(ddf->phys->max_pdes); i++)
1169 if (ddf->phys->entries[i].refnum == phys_refnum)
1170 return i;
1171 return -1;
1172 }
1173
1174 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1175 {
1176 /* The uuid returned here is used for:
1177 * uuid to put into bitmap file (Create, Grow)
1178 * uuid for backup header when saving critical section (Grow)
1179 * comparing uuids when re-adding a device into an array
1180 * For each of these we can make do with a truncated
1181 * or hashed uuid rather than the original, as long as
1182 * everyone agrees.
1183 * In each case the uuid required is that of the data-array,
1184 * not the device-set.
1185 * In the case of SVD we assume the BVD is of interest,
1186 * though that might be the case if a bitmap were made for
1187 * a mirrored SVD - worry about that later.
1188 * So we need to find the VD configuration record for the
1189 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1190 * The first 16 bytes of the sha1 of these is used.
1191 */
1192 struct ddf_super *ddf = st->sb;
1193 struct vcl *vcl = ddf->currentconf;
1194
1195 if (!vcl)
1196 memset(uuid, 0, sizeof (uuid));
1197 else {
1198 char buf[20];
1199 struct sha1_ctx ctx;
1200 sha1_init_ctx(&ctx);
1201 sha1_process_bytes(&vcl->conf.guid, DDF_GUID_LEN, &ctx);
1202 if (vcl->conf.sec_elmnt_count > 1)
1203 sha1_process_bytes(&vcl->conf.sec_elmnt_seq, 1, &ctx);
1204 sha1_finish_ctx(&ctx, buf);
1205 memcpy(uuid, buf, sizeof(uuid));
1206 }
1207 }
1208
1209 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info);
1210
1211 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info)
1212 {
1213 struct ddf_super *ddf = st->sb;
1214
1215 if (ddf->currentconf) {
1216 getinfo_super_ddf_bvd(st, info);
1217 return;
1218 }
1219
1220 info->array.raid_disks = __be16_to_cpu(ddf->phys->used_pdes);
1221 info->array.level = LEVEL_CONTAINER;
1222 info->array.layout = 0;
1223 info->array.md_minor = -1;
1224 info->array.ctime = DECADE + __be32_to_cpu(*(__u32*)
1225 (ddf->anchor.guid+16));
1226 info->array.utime = 0;
1227 info->array.chunk_size = 0;
1228
1229
1230 info->disk.major = 0;
1231 info->disk.minor = 0;
1232 if (ddf->dlist) {
1233 info->disk.number = __be32_to_cpu(ddf->dlist->disk.refnum);
1234 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1235
1236 info->data_offset = __be64_to_cpu(ddf->phys->
1237 entries[info->disk.raid_disk].
1238 config_size);
1239 info->component_size = ddf->dlist->size - info->data_offset;
1240 } else {
1241 info->disk.number = -1;
1242 // info->disk.raid_disk = find refnum in the table and use index;
1243 }
1244 info->disk.state = (1 << MD_DISK_SYNC);
1245
1246
1247 info->reshape_active = 0;
1248
1249 strcpy(info->text_version, "ddf");
1250 info->safe_mode_delay = 0;
1251
1252 // uuid_from_super_ddf(info->uuid, sbv);
1253
1254 // info->name[] ?? ;
1255 }
1256
1257 static int rlq_to_layout(int rlq, int prl, int raiddisks);
1258
1259 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info)
1260 {
1261 struct ddf_super *ddf = st->sb;
1262 struct vcl *vc = ddf->currentconf;
1263 int cd = ddf->currentdev;
1264
1265 /* FIXME this returns BVD info - what if we want SVD ?? */
1266
1267 info->array.raid_disks = __be16_to_cpu(vc->conf.prim_elmnt_count);
1268 info->array.level = map_num1(ddf_level_num, vc->conf.prl);
1269 info->array.layout = rlq_to_layout(vc->conf.rlq, vc->conf.prl,
1270 info->array.raid_disks);
1271 info->array.md_minor = -1;
1272 info->array.ctime = DECADE +
1273 __be32_to_cpu(*(__u32*)(vc->conf.guid+16));
1274 info->array.utime = DECADE + __be32_to_cpu(vc->conf.timestamp);
1275 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1276
1277 if (cd >= 0 && cd < ddf->mppe) {
1278 info->data_offset = __be64_to_cpu(vc->lba_offset[cd]);
1279 if (vc->block_sizes)
1280 info->component_size = vc->block_sizes[cd];
1281 else
1282 info->component_size = __be64_to_cpu(vc->conf.blocks);
1283 }
1284
1285 info->disk.major = 0;
1286 info->disk.minor = 0;
1287 // info->disk.number = __be32_to_cpu(ddf->disk.refnum);
1288 // info->disk.raid_disk = find refnum in the table and use index;
1289 // info->disk.state = ???;
1290
1291 info->container_member = ddf->currentconf->vcnum;
1292
1293 info->resync_start = 0;
1294 if (!(ddf->virt->entries[info->container_member].state
1295 & DDF_state_inconsistent) &&
1296 (ddf->virt->entries[info->container_member].init_state
1297 & DDF_initstate_mask)
1298 == DDF_init_full)
1299 info->resync_start = ~0ULL;
1300
1301 uuid_from_super_ddf(st, info->uuid);
1302
1303 info->container_member = atoi(st->subarray);
1304 sprintf(info->text_version, "/%s/%s",
1305 devnum2devname(st->container_dev),
1306 st->subarray);
1307 info->safe_mode_delay = 200;
1308
1309 // info->name[] ?? ;
1310 }
1311
1312
1313 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
1314 char *update,
1315 char *devname, int verbose,
1316 int uuid_set, char *homehost)
1317 {
1318 /* For 'assemble' and 'force' we need to return non-zero if any
1319 * change was made. For others, the return value is ignored.
1320 * Update options are:
1321 * force-one : This device looks a bit old but needs to be included,
1322 * update age info appropriately.
1323 * assemble: clear any 'faulty' flag to allow this device to
1324 * be assembled.
1325 * force-array: Array is degraded but being forced, mark it clean
1326 * if that will be needed to assemble it.
1327 *
1328 * newdev: not used ????
1329 * grow: Array has gained a new device - this is currently for
1330 * linear only
1331 * resync: mark as dirty so a resync will happen.
1332 * uuid: Change the uuid of the array to match what is given
1333 * homehost: update the recorded homehost
1334 * name: update the name - preserving the homehost
1335 * _reshape_progress: record new reshape_progress position.
1336 *
1337 * Following are not relevant for this version:
1338 * sparc2.2 : update from old dodgey metadata
1339 * super-minor: change the preferred_minor number
1340 * summaries: update redundant counters.
1341 */
1342 int rv = 0;
1343 // struct ddf_super *ddf = st->sb;
1344 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
1345 // struct virtual_entry *ve = find_ve(ddf);
1346
1347 /* we don't need to handle "force-*" or "assemble" as
1348 * there is no need to 'trick' the kernel. We the metadata is
1349 * first updated to activate the array, all the implied modifications
1350 * will just happen.
1351 */
1352
1353 if (strcmp(update, "grow") == 0) {
1354 /* FIXME */
1355 }
1356 if (strcmp(update, "resync") == 0) {
1357 // info->resync_checkpoint = 0;
1358 }
1359 /* We ignore UUID updates as they make even less sense
1360 * with DDF
1361 */
1362 if (strcmp(update, "homehost") == 0) {
1363 /* homehost is stored in controller->vendor_data,
1364 * or it is when we are the vendor
1365 */
1366 // if (info->vendor_is_local)
1367 // strcpy(ddf->controller.vendor_data, homehost);
1368 }
1369 if (strcmp(update, "name") == 0) {
1370 /* name is stored in virtual_entry->name */
1371 // memset(ve->name, ' ', 16);
1372 // strncpy(ve->name, info->name, 16);
1373 }
1374 if (strcmp(update, "_reshape_progress") == 0) {
1375 /* We don't support reshape yet */
1376 }
1377
1378 // update_all_csum(ddf);
1379
1380 return rv;
1381 }
1382
1383 static void make_header_guid(char *guid)
1384 {
1385 __u32 stamp;
1386 int rfd;
1387 /* Create a DDF Header of Virtual Disk GUID */
1388
1389 /* 24 bytes of fiction required.
1390 * first 8 are a 'vendor-id' - "Linux-MD"
1391 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
1392 * Remaining 8 random number plus timestamp
1393 */
1394 memcpy(guid, T10, sizeof(T10));
1395 stamp = __cpu_to_be32(0xdeadbeef);
1396 memcpy(guid+8, &stamp, 4);
1397 stamp = __cpu_to_be32(0);
1398 memcpy(guid+12, &stamp, 4);
1399 stamp = __cpu_to_be32(time(0) - DECADE);
1400 memcpy(guid+16, &stamp, 4);
1401 rfd = open("/dev/urandom", O_RDONLY);
1402 if (rfd < 0 || read(rfd, &stamp, 4) != 4)
1403 stamp = random();
1404 memcpy(guid+20, &stamp, 4);
1405 if (rfd >= 0) close(rfd);
1406 }
1407
1408 static int init_super_ddf_bvd(struct supertype *st,
1409 mdu_array_info_t *info,
1410 unsigned long long size,
1411 char *name, char *homehost,
1412 int *uuid);
1413
1414 static int init_super_ddf(struct supertype *st,
1415 mdu_array_info_t *info,
1416 unsigned long long size, char *name, char *homehost,
1417 int *uuid)
1418 {
1419 /* This is primarily called by Create when creating a new array.
1420 * We will then get add_to_super called for each component, and then
1421 * write_init_super called to write it out to each device.
1422 * For DDF, Create can create on fresh devices or on a pre-existing
1423 * array.
1424 * To create on a pre-existing array a different method will be called.
1425 * This one is just for fresh drives.
1426 *
1427 * We need to create the entire 'ddf' structure which includes:
1428 * DDF headers - these are easy.
1429 * Controller data - a Sector describing this controller .. not that
1430 * this is a controller exactly.
1431 * Physical Disk Record - one entry per device, so
1432 * leave plenty of space.
1433 * Virtual Disk Records - again, just leave plenty of space.
1434 * This just lists VDs, doesn't give details
1435 * Config records - describes the VDs that use this disk
1436 * DiskData - describes 'this' device.
1437 * BadBlockManagement - empty
1438 * Diag Space - empty
1439 * Vendor Logs - Could we put bitmaps here?
1440 *
1441 */
1442 struct ddf_super *ddf;
1443 char hostname[17];
1444 int hostlen;
1445 int max_phys_disks, max_virt_disks;
1446 unsigned long long sector;
1447 int clen;
1448 int i;
1449 int pdsize, vdsize;
1450 struct phys_disk *pd;
1451 struct virtual_disk *vd;
1452
1453 if (!info) {
1454 st->sb = NULL;
1455 return 0;
1456 }
1457 if (st->sb)
1458 return init_super_ddf_bvd(st, info, size, name, homehost,
1459 uuid);
1460
1461 posix_memalign((void**)&ddf, 512, sizeof(*ddf));
1462 memset(ddf, 0, sizeof(*ddf));
1463 ddf->dlist = NULL; /* no physical disks yet */
1464 ddf->conflist = NULL; /* No virtual disks yet */
1465
1466 /* At least 32MB *must* be reserved for the ddf. So let's just
1467 * start 32MB from the end, and put the primary header there.
1468 * Don't do secondary for now.
1469 * We don't know exactly where that will be yet as it could be
1470 * different on each device. To just set up the lengths.
1471 *
1472 */
1473
1474 ddf->anchor.magic = DDF_HEADER_MAGIC;
1475 make_header_guid(ddf->anchor.guid);
1476
1477 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
1478 ddf->anchor.seq = __cpu_to_be32(1);
1479 ddf->anchor.timestamp = __cpu_to_be32(time(0) - DECADE);
1480 ddf->anchor.openflag = 0xFF;
1481 ddf->anchor.foreignflag = 0;
1482 ddf->anchor.enforcegroups = 0; /* Is this best?? */
1483 ddf->anchor.pad0 = 0xff;
1484 memset(ddf->anchor.pad1, 0xff, 12);
1485 memset(ddf->anchor.header_ext, 0xff, 32);
1486 ddf->anchor.primary_lba = ~(__u64)0;
1487 ddf->anchor.secondary_lba = ~(__u64)0;
1488 ddf->anchor.type = DDF_HEADER_ANCHOR;
1489 memset(ddf->anchor.pad2, 0xff, 3);
1490 ddf->anchor.workspace_len = __cpu_to_be32(32768); /* Must be reserved */
1491 ddf->anchor.workspace_lba = ~(__u64)0; /* Put this at bottom
1492 of 32M reserved.. */
1493 max_phys_disks = 1023; /* Should be enough */
1494 ddf->anchor.max_pd_entries = __cpu_to_be16(max_phys_disks);
1495 max_virt_disks = 255;
1496 ddf->anchor.max_vd_entries = __cpu_to_be16(max_virt_disks); /* ?? */
1497 ddf->anchor.max_partitions = __cpu_to_be16(64); /* ?? */
1498 ddf->max_part = 64;
1499 ddf->mppe = 256;
1500 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
1501 ddf->anchor.config_record_len = __cpu_to_be16(ddf->conf_rec_len);
1502 ddf->anchor.max_primary_element_entries = __cpu_to_be16(ddf->mppe);
1503 memset(ddf->anchor.pad3, 0xff, 54);
1504 /* controller sections is one sector long immediately
1505 * after the ddf header */
1506 sector = 1;
1507 ddf->anchor.controller_section_offset = __cpu_to_be32(sector);
1508 ddf->anchor.controller_section_length = __cpu_to_be32(1);
1509 sector += 1;
1510
1511 /* phys is 8 sectors after that */
1512 pdsize = ROUND_UP(sizeof(struct phys_disk) +
1513 sizeof(struct phys_disk_entry)*max_phys_disks,
1514 512);
1515 switch(pdsize/512) {
1516 case 2: case 8: case 32: case 128: case 512: break;
1517 default: abort();
1518 }
1519 ddf->anchor.phys_section_offset = __cpu_to_be32(sector);
1520 ddf->anchor.phys_section_length =
1521 __cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
1522 sector += pdsize/512;
1523
1524 /* virt is another 32 sectors */
1525 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
1526 sizeof(struct virtual_entry) * max_virt_disks,
1527 512);
1528 switch(vdsize/512) {
1529 case 2: case 8: case 32: case 128: case 512: break;
1530 default: abort();
1531 }
1532 ddf->anchor.virt_section_offset = __cpu_to_be32(sector);
1533 ddf->anchor.virt_section_length =
1534 __cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
1535 sector += vdsize/512;
1536
1537 clen = ddf->conf_rec_len * (ddf->max_part+1);
1538 ddf->anchor.config_section_offset = __cpu_to_be32(sector);
1539 ddf->anchor.config_section_length = __cpu_to_be32(clen);
1540 sector += clen;
1541
1542 ddf->anchor.data_section_offset = __cpu_to_be32(sector);
1543 ddf->anchor.data_section_length = __cpu_to_be32(1);
1544 sector += 1;
1545
1546 ddf->anchor.bbm_section_length = __cpu_to_be32(0);
1547 ddf->anchor.bbm_section_offset = __cpu_to_be32(0xFFFFFFFF);
1548 ddf->anchor.diag_space_length = __cpu_to_be32(0);
1549 ddf->anchor.diag_space_offset = __cpu_to_be32(0xFFFFFFFF);
1550 ddf->anchor.vendor_length = __cpu_to_be32(0);
1551 ddf->anchor.vendor_offset = __cpu_to_be32(0xFFFFFFFF);
1552
1553 memset(ddf->anchor.pad4, 0xff, 256);
1554
1555 memcpy(&ddf->primary, &ddf->anchor, 512);
1556 memcpy(&ddf->secondary, &ddf->anchor, 512);
1557
1558 ddf->primary.openflag = 1; /* I guess.. */
1559 ddf->primary.type = DDF_HEADER_PRIMARY;
1560
1561 ddf->secondary.openflag = 1; /* I guess.. */
1562 ddf->secondary.type = DDF_HEADER_SECONDARY;
1563
1564 ddf->active = &ddf->primary;
1565
1566 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
1567
1568 /* 24 more bytes of fiction required.
1569 * first 8 are a 'vendor-id' - "Linux-MD"
1570 * Remaining 16 are serial number.... maybe a hostname would do?
1571 */
1572 memcpy(ddf->controller.guid, T10, sizeof(T10));
1573 gethostname(hostname, sizeof(hostname));
1574 hostname[sizeof(hostname) - 1] = 0;
1575 hostlen = strlen(hostname);
1576 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
1577 for (i = strlen(T10) ; i+hostlen < 24; i++)
1578 ddf->controller.guid[i] = ' ';
1579
1580 ddf->controller.type.vendor_id = __cpu_to_be16(0xDEAD);
1581 ddf->controller.type.device_id = __cpu_to_be16(0xBEEF);
1582 ddf->controller.type.sub_vendor_id = 0;
1583 ddf->controller.type.sub_device_id = 0;
1584 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
1585 memset(ddf->controller.pad, 0xff, 8);
1586 memset(ddf->controller.vendor_data, 0xff, 448);
1587
1588 posix_memalign((void**)&pd, 512, pdsize);
1589 ddf->phys = pd;
1590 ddf->pdsize = pdsize;
1591
1592 memset(pd, 0xff, pdsize);
1593 memset(pd, 0, sizeof(*pd));
1594 pd->magic = DDF_PHYS_DATA_MAGIC;
1595 pd->used_pdes = __cpu_to_be16(0);
1596 pd->max_pdes = __cpu_to_be16(max_phys_disks);
1597 memset(pd->pad, 0xff, 52);
1598
1599 posix_memalign((void**)&vd, 512, vdsize);
1600 ddf->virt = vd;
1601 ddf->vdsize = vdsize;
1602 memset(vd, 0, vdsize);
1603 vd->magic = DDF_VIRT_RECORDS_MAGIC;
1604 vd->populated_vdes = __cpu_to_be16(0);
1605 vd->max_vdes = __cpu_to_be16(max_virt_disks);
1606 memset(vd->pad, 0xff, 52);
1607
1608 for (i=0; i<max_virt_disks; i++)
1609 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
1610
1611 st->sb = ddf;
1612 ddf->updates_pending = 1;
1613 return 1;
1614 }
1615
1616 static int all_ff(char *guid)
1617 {
1618 int i;
1619 for (i = 0; i < DDF_GUID_LEN; i++)
1620 if (guid[i] != (char)0xff)
1621 return 0;
1622 return 1;
1623 }
1624 static int chunk_to_shift(int chunksize)
1625 {
1626 return ffs(chunksize/512)-1;
1627 }
1628
1629 static int level_to_prl(int level)
1630 {
1631 switch (level) {
1632 case LEVEL_LINEAR: return DDF_CONCAT;
1633 case 0: return DDF_RAID0;
1634 case 1: return DDF_RAID1;
1635 case 4: return DDF_RAID4;
1636 case 5: return DDF_RAID5;
1637 case 6: return DDF_RAID6;
1638 default: return -1;
1639 }
1640 }
1641 static int layout_to_rlq(int level, int layout, int raiddisks)
1642 {
1643 switch(level) {
1644 case 0:
1645 return DDF_RAID0_SIMPLE;
1646 case 1:
1647 switch(raiddisks) {
1648 case 2: return DDF_RAID1_SIMPLE;
1649 case 3: return DDF_RAID1_MULTI;
1650 default: return -1;
1651 }
1652 case 4:
1653 switch(layout) {
1654 case 0: return DDF_RAID4_N;
1655 }
1656 break;
1657 case 5:
1658 case 6:
1659 switch(layout) {
1660 case ALGORITHM_LEFT_ASYMMETRIC:
1661 return DDF_RAID5_N_RESTART;
1662 case ALGORITHM_RIGHT_ASYMMETRIC:
1663 if (level == 5)
1664 return DDF_RAID5_0_RESTART;
1665 else
1666 return DDF_RAID6_0_RESTART;
1667 case ALGORITHM_LEFT_SYMMETRIC:
1668 return DDF_RAID5_N_CONTINUE;
1669 case ALGORITHM_RIGHT_SYMMETRIC:
1670 return -1; /* not mentioned in standard */
1671 }
1672 }
1673 return -1;
1674 }
1675
1676 static int rlq_to_layout(int rlq, int prl, int raiddisks)
1677 {
1678 switch(prl) {
1679 case DDF_RAID0:
1680 return 0; /* hopefully rlq == DDF_RAID0_SIMPLE */
1681 case DDF_RAID1:
1682 return 0; /* hopefully rlq == SIMPLE or MULTI depending
1683 on raiddisks*/
1684 case DDF_RAID4:
1685 switch(rlq) {
1686 case DDF_RAID4_N:
1687 return 0;
1688 default:
1689 /* not supported */
1690 return -1; /* FIXME this isn't checked */
1691 }
1692 case DDF_RAID5:
1693 switch(rlq) {
1694 case DDF_RAID5_N_RESTART:
1695 return ALGORITHM_LEFT_ASYMMETRIC;
1696 case DDF_RAID5_0_RESTART:
1697 return ALGORITHM_RIGHT_ASYMMETRIC;
1698 case DDF_RAID5_N_CONTINUE:
1699 return ALGORITHM_LEFT_SYMMETRIC;
1700 default:
1701 return -1;
1702 }
1703 case DDF_RAID6:
1704 switch(rlq) {
1705 case DDF_RAID5_N_RESTART:
1706 return ALGORITHM_LEFT_ASYMMETRIC;
1707 case DDF_RAID6_0_RESTART:
1708 return ALGORITHM_RIGHT_ASYMMETRIC;
1709 case DDF_RAID5_N_CONTINUE:
1710 return ALGORITHM_LEFT_SYMMETRIC;
1711 default:
1712 return -1;
1713 }
1714 }
1715 return -1;
1716 }
1717
1718 struct extent {
1719 unsigned long long start, size;
1720 };
1721 static int cmp_extent(const void *av, const void *bv)
1722 {
1723 const struct extent *a = av;
1724 const struct extent *b = bv;
1725 if (a->start < b->start)
1726 return -1;
1727 if (a->start > b->start)
1728 return 1;
1729 return 0;
1730 }
1731
1732 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
1733 {
1734 /* find a list of used extents on the give physical device
1735 * (dnum) of the given ddf.
1736 * Return a malloced array of 'struct extent'
1737
1738 FIXME ignore DDF_Legacy devices?
1739
1740 */
1741 struct extent *rv;
1742 int n = 0;
1743 int i, j;
1744
1745 rv = malloc(sizeof(struct extent) * (ddf->max_part + 2));
1746 if (!rv)
1747 return NULL;
1748
1749 for (i = 0; i < ddf->max_part; i++) {
1750 struct vcl *v = dl->vlist[i];
1751 if (v == NULL)
1752 continue;
1753 for (j=0; j < v->conf.prim_elmnt_count; j++)
1754 if (v->conf.phys_refnum[j] == dl->disk.refnum) {
1755 /* This device plays role 'j' in 'v'. */
1756 rv[n].start = __be64_to_cpu(v->lba_offset[j]);
1757 rv[n].size = __be64_to_cpu(v->conf.blocks);
1758 n++;
1759 break;
1760 }
1761 }
1762 qsort(rv, n, sizeof(*rv), cmp_extent);
1763
1764 rv[n].start = __be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
1765 rv[n].size = 0;
1766 return rv;
1767 }
1768
1769 static int init_super_ddf_bvd(struct supertype *st,
1770 mdu_array_info_t *info,
1771 unsigned long long size,
1772 char *name, char *homehost,
1773 int *uuid)
1774 {
1775 /* We are creating a BVD inside a pre-existing container.
1776 * so st->sb is already set.
1777 * We need to create a new vd_config and a new virtual_entry
1778 */
1779 struct ddf_super *ddf = st->sb;
1780 int venum;
1781 struct virtual_entry *ve;
1782 struct vcl *vcl;
1783 struct vd_config *vc;
1784
1785 if (__be16_to_cpu(ddf->virt->populated_vdes)
1786 >= __be16_to_cpu(ddf->virt->max_vdes)) {
1787 fprintf(stderr, Name": This ddf already has the "
1788 "maximum of %d virtual devices\n",
1789 __be16_to_cpu(ddf->virt->max_vdes));
1790 return 0;
1791 }
1792
1793 for (venum = 0; venum < __be16_to_cpu(ddf->virt->max_vdes); venum++)
1794 if (all_ff(ddf->virt->entries[venum].guid))
1795 break;
1796 if (venum == __be16_to_cpu(ddf->virt->max_vdes)) {
1797 fprintf(stderr, Name ": Cannot find spare slot for "
1798 "virtual disk - DDF is corrupt\n");
1799 return 0;
1800 }
1801 ve = &ddf->virt->entries[venum];
1802
1803 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
1804 * timestamp, random number
1805 */
1806 make_header_guid(ve->guid);
1807 ve->unit = __cpu_to_be16(info->md_minor);
1808 ve->pad0 = 0xFFFF;
1809 ve->guid_crc = crc32(0, (unsigned char*)ddf->anchor.guid, DDF_GUID_LEN);
1810 ve->type = 0;
1811 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
1812 if (info->state & 1) /* clean */
1813 ve->init_state = DDF_init_full;
1814 else
1815 ve->init_state = DDF_init_not;
1816
1817 memset(ve->pad1, 0xff, 14);
1818 memset(ve->name, ' ', 16);
1819 if (name)
1820 strncpy(ve->name, name, 16);
1821 ddf->virt->populated_vdes =
1822 __cpu_to_be16(__be16_to_cpu(ddf->virt->populated_vdes)+1);
1823
1824 /* Now create a new vd_config */
1825 posix_memalign((void**)&vcl, 512,
1826 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512));
1827 vcl->lba_offset = (__u64*) &vcl->conf.phys_refnum[ddf->mppe];
1828 vcl->vcnum = venum;
1829 sprintf(st->subarray, "%d", venum);
1830 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1831
1832 vc = &vcl->conf;
1833
1834 vc->magic = DDF_VD_CONF_MAGIC;
1835 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
1836 vc->timestamp = __cpu_to_be32(time(0)-DECADE);
1837 vc->seqnum = __cpu_to_be32(1);
1838 memset(vc->pad0, 0xff, 24);
1839 vc->prim_elmnt_count = __cpu_to_be16(info->raid_disks);
1840 vc->chunk_shift = chunk_to_shift(info->chunk_size);
1841 vc->prl = level_to_prl(info->level);
1842 vc->rlq = layout_to_rlq(info->level, info->layout, info->raid_disks);
1843 vc->sec_elmnt_count = 1;
1844 vc->sec_elmnt_seq = 0;
1845 vc->srl = 0;
1846 vc->blocks = __cpu_to_be64(info->size * 2);
1847 vc->array_blocks = __cpu_to_be64(
1848 calc_array_size(info->level, info->raid_disks, info->layout,
1849 info->chunk_size, info->size*2));
1850 memset(vc->pad1, 0xff, 8);
1851 vc->spare_refs[0] = 0xffffffff;
1852 vc->spare_refs[1] = 0xffffffff;
1853 vc->spare_refs[2] = 0xffffffff;
1854 vc->spare_refs[3] = 0xffffffff;
1855 vc->spare_refs[4] = 0xffffffff;
1856 vc->spare_refs[5] = 0xffffffff;
1857 vc->spare_refs[6] = 0xffffffff;
1858 vc->spare_refs[7] = 0xffffffff;
1859 memset(vc->cache_pol, 0, 8);
1860 vc->bg_rate = 0x80;
1861 memset(vc->pad2, 0xff, 3);
1862 memset(vc->pad3, 0xff, 52);
1863 memset(vc->pad4, 0xff, 192);
1864 memset(vc->v0, 0xff, 32);
1865 memset(vc->v1, 0xff, 32);
1866 memset(vc->v2, 0xff, 16);
1867 memset(vc->v3, 0xff, 16);
1868 memset(vc->vendor, 0xff, 32);
1869
1870 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
1871 memset(vc->phys_refnum+(ddf->mppe * 4), 0x00, 8*ddf->mppe);
1872
1873 vcl->next = ddf->conflist;
1874 ddf->conflist = vcl;
1875 ddf->currentconf = vcl;
1876 ddf->updates_pending = 1;
1877 return 1;
1878 }
1879
1880 static void add_to_super_ddf_bvd(struct supertype *st,
1881 mdu_disk_info_t *dk, int fd, char *devname)
1882 {
1883 /* fd and devname identify a device with-in the ddf container (st).
1884 * dk identifies a location in the new BVD.
1885 * We need to find suitable free space in that device and update
1886 * the phys_refnum and lba_offset for the newly created vd_config.
1887 * We might also want to update the type in the phys_disk
1888 * section.
1889 */
1890 struct dl *dl;
1891 struct ddf_super *ddf = st->sb;
1892 struct vd_config *vc;
1893 __u64 *lba_offset;
1894 int working;
1895 int i;
1896 unsigned long long blocks, pos, esize;
1897 struct extent *ex;
1898
1899 for (dl = ddf->dlist; dl ; dl = dl->next)
1900 if (dl->major == dk->major &&
1901 dl->minor == dk->minor)
1902 break;
1903 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
1904 return;
1905
1906 vc = &ddf->currentconf->conf;
1907 lba_offset = ddf->currentconf->lba_offset;
1908
1909 ex = get_extents(ddf, dl);
1910 if (!ex)
1911 return;
1912
1913 i = 0; pos = 0;
1914 blocks = __be64_to_cpu(vc->blocks);
1915 if (ddf->currentconf->block_sizes)
1916 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
1917
1918 do {
1919 esize = ex[i].start - pos;
1920 if (esize >= blocks)
1921 break;
1922 pos = ex[i].start + ex[i].size;
1923 i++;
1924 } while (ex[i-1].size);
1925
1926 free(ex);
1927 if (esize < blocks)
1928 return;
1929
1930 ddf->currentdev = dk->raid_disk;
1931 vc->phys_refnum[dk->raid_disk] = dl->disk.refnum;
1932 lba_offset[dk->raid_disk] = __cpu_to_be64(pos);
1933
1934 for (i=0; i < ddf->max_part ; i++)
1935 if (dl->vlist[i] == NULL)
1936 break;
1937 if (i == ddf->max_part)
1938 return;
1939 dl->vlist[i] = ddf->currentconf;
1940
1941 dl->fd = fd;
1942 dl->devname = devname;
1943
1944 /* Check how many working raid_disks, and if we can mark
1945 * array as optimal yet
1946 */
1947 working = 0;
1948
1949 for (i=0; i < __be16_to_cpu(vc->prim_elmnt_count); i++)
1950 if (vc->phys_refnum[i] != 0xffffffff)
1951 working++;
1952
1953 /* Find which virtual_entry */
1954 i = ddf->currentconf->vcnum;
1955 if (working == __be16_to_cpu(vc->prim_elmnt_count))
1956 ddf->virt->entries[i].state =
1957 (ddf->virt->entries[i].state & ~DDF_state_mask)
1958 | DDF_state_optimal;
1959
1960 if (vc->prl == DDF_RAID6 &&
1961 working+1 == __be16_to_cpu(vc->prim_elmnt_count))
1962 ddf->virt->entries[i].state =
1963 (ddf->virt->entries[i].state & ~DDF_state_mask)
1964 | DDF_state_part_optimal;
1965
1966 ddf->phys->entries[dl->pdnum].type &= ~__cpu_to_be16(DDF_Global_Spare);
1967 ddf->phys->entries[dl->pdnum].type |= __cpu_to_be16(DDF_Active_in_VD);
1968 ddf->updates_pending = 1;
1969 }
1970
1971 /* add a device to a container, either while creating it or while
1972 * expanding a pre-existing container
1973 */
1974 static void add_to_super_ddf(struct supertype *st,
1975 mdu_disk_info_t *dk, int fd, char *devname)
1976 {
1977 struct ddf_super *ddf = st->sb;
1978 struct dl *dd;
1979 time_t now;
1980 struct tm *tm;
1981 unsigned long long size;
1982 struct phys_disk_entry *pde;
1983 int n, i;
1984 struct stat stb;
1985
1986 if (ddf->currentconf) {
1987 add_to_super_ddf_bvd(st, dk, fd, devname);
1988 return;
1989 }
1990
1991 /* This is device numbered dk->number. We need to create
1992 * a phys_disk entry and a more detailed disk_data entry.
1993 */
1994 fstat(fd, &stb);
1995 posix_memalign((void**)&dd, 512,
1996 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part);
1997 dd->major = major(stb.st_rdev);
1998 dd->minor = minor(stb.st_rdev);
1999 dd->devname = devname;
2000 dd->fd = fd;
2001 dd->spare = NULL;
2002
2003 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2004 now = time(0);
2005 tm = localtime(&now);
2006 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2007 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2008 *(__u32*)(dd->disk.guid + 16) = random();
2009 *(__u32*)(dd->disk.guid + 20) = random();
2010
2011 do {
2012 /* Cannot be bothered finding a CRC of some irrelevant details*/
2013 dd->disk.refnum = random();
2014 for (i = __be16_to_cpu(ddf->active->max_pd_entries) - 1;
2015 i >= 0; i--)
2016 if (ddf->phys->entries[i].refnum == dd->disk.refnum)
2017 break;
2018 } while (i >= 0);
2019
2020 dd->disk.forced_ref = 1;
2021 dd->disk.forced_guid = 1;
2022 memset(dd->disk.vendor, ' ', 32);
2023 memcpy(dd->disk.vendor, "Linux", 5);
2024 memset(dd->disk.pad, 0xff, 442);
2025 for (i = 0; i < ddf->max_part ; i++)
2026 dd->vlist[i] = NULL;
2027
2028 n = __be16_to_cpu(ddf->phys->used_pdes);
2029 pde = &ddf->phys->entries[n];
2030 dd->pdnum = n;
2031
2032 if (st->update_tail) {
2033 int len = (sizeof(struct phys_disk) +
2034 sizeof(struct phys_disk_entry));
2035 struct phys_disk *pd;
2036
2037 pd = malloc(len);
2038 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2039 pd->used_pdes = __cpu_to_be16(n);
2040 pde = &pd->entries[0];
2041 dd->mdupdate = pd;
2042 } else {
2043 n++;
2044 ddf->phys->used_pdes = __cpu_to_be16(n);
2045 }
2046
2047 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2048 pde->refnum = dd->disk.refnum;
2049 pde->type = __cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2050 pde->state = __cpu_to_be16(DDF_Online);
2051 get_dev_size(fd, NULL, &size);
2052 /* We are required to reserve 32Meg, and record the size in sectors */
2053 pde->config_size = __cpu_to_be64( (size - 32*1024*1024) / 512);
2054 sprintf(pde->path, "%17.17s","Information: nil") ;
2055 memset(pde->pad, 0xff, 6);
2056
2057 dd->size = size >> 9;
2058 if (st->update_tail) {
2059 dd->next = ddf->add_list;
2060 ddf->add_list = dd;
2061 } else {
2062 dd->next = ddf->dlist;
2063 ddf->dlist = dd;
2064 ddf->updates_pending = 1;
2065 }
2066 }
2067
2068 /*
2069 * This is the write_init_super method for a ddf container. It is
2070 * called when creating a container or adding another device to a
2071 * container.
2072 */
2073
2074 #ifndef MDASSEMBLE
2075
2076 static unsigned char null_conf[4096+512];
2077
2078 static int __write_init_super_ddf(struct supertype *st, int do_close)
2079 {
2080
2081 struct ddf_super *ddf = st->sb;
2082 int i;
2083 struct dl *d;
2084 int n_config;
2085 int conf_size;
2086
2087 unsigned long long size, sector;
2088
2089 for (d = ddf->dlist; d; d=d->next) {
2090 int fd = d->fd;
2091
2092 if (fd < 0)
2093 continue;
2094
2095 /* We need to fill in the primary, (secondary) and workspace
2096 * lba's in the headers, set their checksums,
2097 * Also checksum phys, virt....
2098 *
2099 * Then write everything out, finally the anchor is written.
2100 */
2101 get_dev_size(fd, NULL, &size);
2102 size /= 512;
2103 ddf->anchor.workspace_lba = __cpu_to_be64(size - 32*1024*2);
2104 ddf->anchor.primary_lba = __cpu_to_be64(size - 16*1024*2);
2105 ddf->anchor.seq = __cpu_to_be32(1);
2106 memcpy(&ddf->primary, &ddf->anchor, 512);
2107 memcpy(&ddf->secondary, &ddf->anchor, 512);
2108
2109 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
2110 ddf->anchor.seq = 0xFFFFFFFF; /* no sequencing in anchor */
2111 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
2112
2113 ddf->primary.openflag = 0;
2114 ddf->primary.type = DDF_HEADER_PRIMARY;
2115
2116 ddf->secondary.openflag = 0;
2117 ddf->secondary.type = DDF_HEADER_SECONDARY;
2118
2119 ddf->primary.crc = calc_crc(&ddf->primary, 512);
2120 ddf->secondary.crc = calc_crc(&ddf->secondary, 512);
2121
2122 sector = size - 16*1024*2;
2123 lseek64(fd, sector<<9, 0);
2124 write(fd, &ddf->primary, 512);
2125
2126 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2127 write(fd, &ddf->controller, 512);
2128
2129 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2130
2131 write(fd, ddf->phys, ddf->pdsize);
2132
2133 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2134 write(fd, ddf->virt, ddf->vdsize);
2135
2136 /* Now write lots of config records. */
2137 n_config = ddf->max_part;
2138 conf_size = ddf->conf_rec_len * 512;
2139 for (i = 0 ; i <= n_config ; i++) {
2140 struct vcl *c = d->vlist[i];
2141 if (i == n_config)
2142 c = (struct vcl*)d->spare;
2143
2144 if (c) {
2145 c->conf.crc = calc_crc(&c->conf, conf_size);
2146 write(fd, &c->conf, conf_size);
2147 } else {
2148 char *null_aligned = (char*)((((unsigned long)null_conf)+511)&~511UL);
2149 if (null_conf[0] != 0xff)
2150 memset(null_conf, 0xff, sizeof(null_conf));
2151 int togo = conf_size;
2152 while (togo > sizeof(null_conf)-512) {
2153 write(fd, null_aligned, sizeof(null_conf)-512);
2154 togo -= sizeof(null_conf)-512;
2155 }
2156 write(fd, null_aligned, togo);
2157 }
2158 }
2159 d->disk.crc = calc_crc(&d->disk, 512);
2160 write(fd, &d->disk, 512);
2161
2162 /* Maybe do the same for secondary */
2163
2164 lseek64(fd, (size-1)*512, SEEK_SET);
2165 write(fd, &ddf->anchor, 512);
2166 if (do_close) {
2167 close(fd);
2168 d->fd = -1;
2169 }
2170 }
2171 return 1;
2172 }
2173
2174 static int write_init_super_ddf(struct supertype *st)
2175 {
2176
2177 if (st->update_tail) {
2178 /* queue the virtual_disk and vd_config as metadata updates */
2179 struct virtual_disk *vd;
2180 struct vd_config *vc;
2181 struct ddf_super *ddf = st->sb;
2182 int len;
2183
2184 if (!ddf->currentconf) {
2185 int len = (sizeof(struct phys_disk) +
2186 sizeof(struct phys_disk_entry));
2187
2188 /* adding a disk to the container. */
2189 if (!ddf->add_list)
2190 return 0;
2191
2192 append_metadata_update(st, ddf->add_list->mdupdate, len);
2193 ddf->add_list->mdupdate = NULL;
2194 return 0;
2195 }
2196
2197 /* Newly created VD */
2198
2199 /* First the virtual disk. We have a slightly fake header */
2200 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
2201 vd = malloc(len);
2202 *vd = *ddf->virt;
2203 vd->entries[0] = ddf->virt->entries[ddf->currentconf->vcnum];
2204 vd->populated_vdes = __cpu_to_be16(ddf->currentconf->vcnum);
2205 append_metadata_update(st, vd, len);
2206
2207 /* Then the vd_config */
2208 len = ddf->conf_rec_len * 512;
2209 vc = malloc(len);
2210 memcpy(vc, &ddf->currentconf->conf, len);
2211 append_metadata_update(st, vc, len);
2212
2213 /* FIXME I need to close the fds! */
2214 return 0;
2215 } else
2216 return __write_init_super_ddf(st, 1);
2217 }
2218
2219 #endif
2220
2221 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize)
2222 {
2223 /* We must reserve the last 32Meg */
2224 if (devsize <= 32*1024*2)
2225 return 0;
2226 return devsize - 32*1024*2;
2227 }
2228
2229 #ifndef MDASSEMBLE
2230 static int
2231 validate_geometry_ddf_container(struct supertype *st,
2232 int level, int layout, int raiddisks,
2233 int chunk, unsigned long long size,
2234 char *dev, unsigned long long *freesize,
2235 int verbose);
2236
2237 static int validate_geometry_ddf_bvd(struct supertype *st,
2238 int level, int layout, int raiddisks,
2239 int chunk, unsigned long long size,
2240 char *dev, unsigned long long *freesize,
2241 int verbose);
2242
2243 static int validate_geometry_ddf(struct supertype *st,
2244 int level, int layout, int raiddisks,
2245 int chunk, unsigned long long size,
2246 char *dev, unsigned long long *freesize,
2247 int verbose)
2248 {
2249 int fd;
2250 struct mdinfo *sra;
2251 int cfd;
2252
2253 /* ddf potentially supports lots of things, but it depends on
2254 * what devices are offered (and maybe kernel version?)
2255 * If given unused devices, we will make a container.
2256 * If given devices in a container, we will make a BVD.
2257 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
2258 */
2259
2260 if (level == LEVEL_CONTAINER) {
2261 /* Must be a fresh device to add to a container */
2262 return validate_geometry_ddf_container(st, level, layout,
2263 raiddisks, chunk,
2264 size, dev, freesize,
2265 verbose);
2266 }
2267
2268 if (st->sb) {
2269 /* A container has already been opened, so we are
2270 * creating in there. Maybe a BVD, maybe an SVD.
2271 * Should make a distinction one day.
2272 */
2273 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
2274 chunk, size, dev, freesize,
2275 verbose);
2276 }
2277 if (!dev) {
2278 /* Initial sanity check. Exclude illegal levels. */
2279 int i;
2280 for (i=0; ddf_level_num[i].num1 != MAXINT; i++)
2281 if (ddf_level_num[i].num2 == level)
2282 break;
2283 if (ddf_level_num[i].num1 == MAXINT)
2284 return 0;
2285 /* Should check layout? etc */
2286 return 1;
2287 }
2288
2289 /* This is the first device for the array.
2290 * If it is a container, we read it in and do automagic allocations,
2291 * no other devices should be given.
2292 * Otherwise it must be a member device of a container, and we
2293 * do manual allocation.
2294 * Later we should check for a BVD and make an SVD.
2295 */
2296 fd = open(dev, O_RDONLY|O_EXCL, 0);
2297 if (fd >= 0) {
2298 sra = sysfs_read(fd, 0, GET_VERSION);
2299 close(fd);
2300 if (sra && sra->array.major_version == -1 &&
2301 strcmp(sra->text_version, "ddf") == 0) {
2302
2303 /* load super */
2304 /* find space for 'n' devices. */
2305 /* remember the devices */
2306 /* Somehow return the fact that we have enough */
2307 }
2308
2309 if (verbose)
2310 fprintf(stderr,
2311 Name ": ddf: Cannot create this array "
2312 "on device %s\n",
2313 dev);
2314 return 0;
2315 }
2316 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
2317 if (verbose)
2318 fprintf(stderr, Name ": ddf: Cannot open %s: %s\n",
2319 dev, strerror(errno));
2320 return 0;
2321 }
2322 /* Well, it is in use by someone, maybe a 'ddf' container. */
2323 cfd = open_container(fd);
2324 if (cfd < 0) {
2325 close(fd);
2326 if (verbose)
2327 fprintf(stderr, Name ": ddf: Cannot use %s: %s\n",
2328 dev, strerror(EBUSY));
2329 return 0;
2330 }
2331 sra = sysfs_read(cfd, 0, GET_VERSION);
2332 close(fd);
2333 if (sra && sra->array.major_version == -1 &&
2334 strcmp(sra->text_version, "ddf") == 0) {
2335 /* This is a member of a ddf container. Load the container
2336 * and try to create a bvd
2337 */
2338 struct ddf_super *ddf;
2339 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL, 1) == 0) {
2340 st->sb = ddf;
2341 st->container_dev = fd2devnum(cfd);
2342 close(cfd);
2343 return validate_geometry_ddf_bvd(st, level, layout,
2344 raiddisks, chunk, size,
2345 dev, freesize,
2346 verbose);
2347 }
2348 close(cfd);
2349 } else /* device may belong to a different container */
2350 return 0;
2351
2352 return 1;
2353 }
2354
2355 static int
2356 validate_geometry_ddf_container(struct supertype *st,
2357 int level, int layout, int raiddisks,
2358 int chunk, unsigned long long size,
2359 char *dev, unsigned long long *freesize,
2360 int verbose)
2361 {
2362 int fd;
2363 unsigned long long ldsize;
2364
2365 if (level != LEVEL_CONTAINER)
2366 return 0;
2367 if (!dev)
2368 return 1;
2369
2370 fd = open(dev, O_RDONLY|O_EXCL, 0);
2371 if (fd < 0) {
2372 if (verbose)
2373 fprintf(stderr, Name ": ddf: Cannot open %s: %s\n",
2374 dev, strerror(errno));
2375 return 0;
2376 }
2377 if (!get_dev_size(fd, dev, &ldsize)) {
2378 close(fd);
2379 return 0;
2380 }
2381 close(fd);
2382
2383 *freesize = avail_size_ddf(st, ldsize >> 9);
2384
2385 return 1;
2386 }
2387
2388 static int validate_geometry_ddf_bvd(struct supertype *st,
2389 int level, int layout, int raiddisks,
2390 int chunk, unsigned long long size,
2391 char *dev, unsigned long long *freesize,
2392 int verbose)
2393 {
2394 struct stat stb;
2395 struct ddf_super *ddf = st->sb;
2396 struct dl *dl;
2397 unsigned long long pos = 0;
2398 unsigned long long maxsize;
2399 struct extent *e;
2400 int i;
2401 /* ddf/bvd supports lots of things, but not containers */
2402 if (level == LEVEL_CONTAINER)
2403 return 0;
2404 /* We must have the container info already read in. */
2405 if (!ddf)
2406 return 0;
2407
2408 if (!dev) {
2409 /* General test: make sure there is space for
2410 * 'raiddisks' device extents of size 'size'.
2411 */
2412 unsigned long long minsize = size;
2413 int dcnt = 0;
2414 if (minsize == 0)
2415 minsize = 8;
2416 for (dl = ddf->dlist; dl ; dl = dl->next)
2417 {
2418 int found = 0;
2419 pos = 0;
2420
2421 i = 0;
2422 e = get_extents(ddf, dl);
2423 if (!e) continue;
2424 do {
2425 unsigned long long esize;
2426 esize = e[i].start - pos;
2427 if (esize >= minsize)
2428 found = 1;
2429 pos = e[i].start + e[i].size;
2430 i++;
2431 } while (e[i-1].size);
2432 if (found)
2433 dcnt++;
2434 free(e);
2435 }
2436 if (dcnt < raiddisks) {
2437 if (verbose)
2438 fprintf(stderr,
2439 Name ": ddf: Not enough devices with "
2440 "space for this array (%d < %d)\n",
2441 dcnt, raiddisks);
2442 return 0;
2443 }
2444 return 1;
2445 }
2446 /* This device must be a member of the set */
2447 if (stat(dev, &stb) < 0)
2448 return 0;
2449 if ((S_IFMT & stb.st_mode) != S_IFBLK)
2450 return 0;
2451 for (dl = ddf->dlist ; dl ; dl = dl->next) {
2452 if (dl->major == major(stb.st_rdev) &&
2453 dl->minor == minor(stb.st_rdev))
2454 break;
2455 }
2456 if (!dl) {
2457 if (verbose)
2458 fprintf(stderr, Name ": ddf: %s is not in the "
2459 "same DDF set\n",
2460 dev);
2461 return 0;
2462 }
2463 e = get_extents(ddf, dl);
2464 maxsize = 0;
2465 i = 0;
2466 if (e) do {
2467 unsigned long long esize;
2468 esize = e[i].start - pos;
2469 if (esize >= maxsize)
2470 maxsize = esize;
2471 pos = e[i].start + e[i].size;
2472 i++;
2473 } while (e[i-1].size);
2474 *freesize = maxsize;
2475 // FIXME here I am
2476
2477 return 1;
2478 }
2479
2480 static int load_super_ddf_all(struct supertype *st, int fd,
2481 void **sbp, char *devname, int keep_fd)
2482 {
2483 struct mdinfo *sra;
2484 struct ddf_super *super;
2485 struct mdinfo *sd, *best = NULL;
2486 int bestseq = 0;
2487 int seq;
2488 char nm[20];
2489 int dfd;
2490
2491 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
2492 if (!sra)
2493 return 1;
2494 if (sra->array.major_version != -1 ||
2495 sra->array.minor_version != -2 ||
2496 strcmp(sra->text_version, "ddf") != 0)
2497 return 1;
2498
2499 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
2500 return 1;
2501 memset(super, 0, sizeof(*super));
2502
2503 /* first, try each device, and choose the best ddf */
2504 for (sd = sra->devs ; sd ; sd = sd->next) {
2505 int rv;
2506 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
2507 dfd = dev_open(nm, O_RDONLY);
2508 if (dfd < 0)
2509 return 2;
2510 rv = load_ddf_headers(dfd, super, NULL);
2511 close(dfd);
2512 if (rv == 0) {
2513 seq = __be32_to_cpu(super->active->seq);
2514 if (super->active->openflag)
2515 seq--;
2516 if (!best || seq > bestseq) {
2517 bestseq = seq;
2518 best = sd;
2519 }
2520 }
2521 }
2522 if (!best)
2523 return 1;
2524 /* OK, load this ddf */
2525 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
2526 dfd = dev_open(nm, O_RDONLY);
2527 if (dfd < 0)
2528 return 1;
2529 load_ddf_headers(dfd, super, NULL);
2530 load_ddf_global(dfd, super, NULL);
2531 close(dfd);
2532 /* Now we need the device-local bits */
2533 for (sd = sra->devs ; sd ; sd = sd->next) {
2534 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
2535 dfd = dev_open(nm, keep_fd? O_RDWR : O_RDONLY);
2536 if (dfd < 0)
2537 return 2;
2538 load_ddf_headers(dfd, super, NULL);
2539 seq = load_ddf_local(dfd, super, NULL, keep_fd);
2540 if (!keep_fd) close(dfd);
2541 }
2542 if (st->subarray[0]) {
2543 struct vcl *v;
2544
2545 for (v = super->conflist; v; v = v->next)
2546 if (v->vcnum == atoi(st->subarray))
2547 super->currentconf = v;
2548 if (!super->currentconf)
2549 return 1;
2550 }
2551 *sbp = super;
2552 if (st->ss == NULL) {
2553 st->ss = &super_ddf;
2554 st->minor_version = 0;
2555 st->max_devs = 512;
2556 st->container_dev = fd2devnum(fd);
2557 }
2558 return 0;
2559 }
2560 #endif
2561
2562 static struct mdinfo *container_content_ddf(struct supertype *st)
2563 {
2564 /* Given a container loaded by load_super_ddf_all,
2565 * extract information about all the arrays into
2566 * an mdinfo tree.
2567 *
2568 * For each vcl in conflist: create an mdinfo, fill it in,
2569 * then look for matching devices (phys_refnum) in dlist
2570 * and create appropriate device mdinfo.
2571 */
2572 struct ddf_super *ddf = st->sb;
2573 struct mdinfo *rest = NULL;
2574 struct vcl *vc;
2575
2576 for (vc = ddf->conflist ; vc ; vc=vc->next)
2577 {
2578 int i;
2579 struct mdinfo *this;
2580 this = malloc(sizeof(*this));
2581 memset(this, 0, sizeof(*this));
2582 this->next = rest;
2583 rest = this;
2584
2585 this->array.level = map_num1(ddf_level_num, vc->conf.prl);
2586 this->array.raid_disks =
2587 __be16_to_cpu(vc->conf.prim_elmnt_count);
2588 this->array.layout = rlq_to_layout(vc->conf.rlq, vc->conf.prl,
2589 this->array.raid_disks);
2590 this->array.md_minor = -1;
2591 this->array.ctime = DECADE +
2592 __be32_to_cpu(*(__u32*)(vc->conf.guid+16));
2593 this->array.utime = DECADE +
2594 __be32_to_cpu(vc->conf.timestamp);
2595 this->array.chunk_size = 512 << vc->conf.chunk_shift;
2596
2597 i = vc->vcnum;
2598 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
2599 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
2600 DDF_init_full) {
2601 this->array.state = 0;
2602 this->resync_start = 0;
2603 } else {
2604 this->array.state = 1;
2605 this->resync_start = ~0ULL;
2606 }
2607 memcpy(this->name, ddf->virt->entries[i].name, 32);
2608 this->name[33]=0;
2609
2610 memset(this->uuid, 0, sizeof(this->uuid));
2611 this->component_size = __be64_to_cpu(vc->conf.blocks);
2612 this->array.size = this->component_size / 2;
2613 this->container_member = i;
2614
2615 sprintf(this->text_version, "/%s/%d",
2616 devnum2devname(st->container_dev),
2617 this->container_member);
2618
2619 for (i=0 ; i < ddf->mppe ; i++) {
2620 struct mdinfo *dev;
2621 struct dl *d;
2622
2623 if (vc->conf.phys_refnum[i] == 0xFFFFFFFF)
2624 continue;
2625
2626 this->array.working_disks++;
2627
2628 for (d = ddf->dlist; d ; d=d->next)
2629 if (d->disk.refnum == vc->conf.phys_refnum[i])
2630 break;
2631 if (d == NULL)
2632 break;
2633
2634 dev = malloc(sizeof(*dev));
2635 memset(dev, 0, sizeof(*dev));
2636 dev->next = this->devs;
2637 this->devs = dev;
2638
2639 dev->disk.number = __be32_to_cpu(d->disk.refnum);
2640 dev->disk.major = d->major;
2641 dev->disk.minor = d->minor;
2642 dev->disk.raid_disk = i;
2643 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2644
2645 dev->events = __be32_to_cpu(ddf->primary.seq);
2646 dev->data_offset = __be64_to_cpu(vc->lba_offset[i]);
2647 dev->component_size = __be64_to_cpu(vc->conf.blocks);
2648 if (d->devname)
2649 strcpy(dev->name, d->devname);
2650 }
2651 }
2652 return rest;
2653 }
2654
2655 static int store_zero_ddf(struct supertype *st, int fd)
2656 {
2657 unsigned long long dsize;
2658 void *buf;
2659
2660 if (!get_dev_size(fd, NULL, &dsize))
2661 return 1;
2662
2663 posix_memalign(&buf, 512, 512);
2664 memset(buf, 0, 512);
2665
2666 lseek64(fd, dsize-512, 0);
2667 write(fd, buf, 512);
2668 free(buf);
2669 return 0;
2670 }
2671
2672 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
2673 {
2674 /*
2675 * return:
2676 * 0 same, or first was empty, and second was copied
2677 * 1 second had wrong number
2678 * 2 wrong uuid
2679 * 3 wrong other info
2680 */
2681 struct ddf_super *first = st->sb;
2682 struct ddf_super *second = tst->sb;
2683
2684 if (!first) {
2685 st->sb = tst->sb;
2686 tst->sb = NULL;
2687 return 0;
2688 }
2689
2690 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
2691 return 2;
2692
2693 /* FIXME should I look at anything else? */
2694 return 0;
2695 }
2696
2697 /*
2698 * A new array 'a' has been started which claims to be instance 'inst'
2699 * within container 'c'.
2700 * We need to confirm that the array matches the metadata in 'c' so
2701 * that we don't corrupt any metadata.
2702 */
2703 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
2704 {
2705 dprintf("ddf: open_new %s\n", inst);
2706 a->info.container_member = atoi(inst);
2707 return 0;
2708 }
2709
2710 /*
2711 * The array 'a' is to be marked clean in the metadata.
2712 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
2713 * clean up to the point (in sectors). If that cannot be recorded in the
2714 * metadata, then leave it as dirty.
2715 *
2716 * For DDF, we need to clear the DDF_state_inconsistent bit in the
2717 * !global! virtual_disk.virtual_entry structure.
2718 */
2719 static int ddf_set_array_state(struct active_array *a, int consistent)
2720 {
2721 struct ddf_super *ddf = a->container->sb;
2722 int inst = a->info.container_member;
2723 int old = ddf->virt->entries[inst].state;
2724 if (consistent == 2) {
2725 /* Should check if a recovery should be started FIXME */
2726 consistent = 1;
2727 if (a->resync_start != ~0ULL)
2728 consistent = 0;
2729 }
2730 if (consistent)
2731 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
2732 else
2733 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
2734 if (old != ddf->virt->entries[inst].state)
2735 ddf->updates_pending = 1;
2736
2737 old = ddf->virt->entries[inst].init_state;
2738 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
2739 if (a->resync_start == ~0ULL)
2740 ddf->virt->entries[inst].init_state |= DDF_init_full;
2741 else if (a->resync_start == 0)
2742 ddf->virt->entries[inst].init_state |= DDF_init_not;
2743 else
2744 ddf->virt->entries[inst].init_state |= DDF_init_quick;
2745 if (old != ddf->virt->entries[inst].init_state)
2746 ddf->updates_pending = 1;
2747
2748 dprintf("ddf mark %d %s %llu\n", inst, consistent?"clean":"dirty",
2749 a->resync_start);
2750 return consistent;
2751 }
2752
2753 /*
2754 * The state of each disk is stored in the global phys_disk structure
2755 * in phys_disk.entries[n].state.
2756 * This makes various combinations awkward.
2757 * - When a device fails in any array, it must be failed in all arrays
2758 * that include a part of this device.
2759 * - When a component is rebuilding, we cannot include it officially in the
2760 * array unless this is the only array that uses the device.
2761 *
2762 * So: when transitioning:
2763 * Online -> failed, just set failed flag. monitor will propagate
2764 * spare -> online, the device might need to be added to the array.
2765 * spare -> failed, just set failed. Don't worry if in array or not.
2766 */
2767 static void ddf_set_disk(struct active_array *a, int n, int state)
2768 {
2769 struct ddf_super *ddf = a->container->sb;
2770 int inst = a->info.container_member;
2771 struct vd_config *vc = find_vdcr(ddf, inst);
2772 int pd = find_phys(ddf, vc->phys_refnum[n]);
2773 int i, st, working;
2774
2775 if (vc == NULL) {
2776 dprintf("ddf: cannot find instance %d!!\n", inst);
2777 return;
2778 }
2779 if (pd < 0) {
2780 /* disk doesn't currently exist. If it is now in_sync,
2781 * insert it. */
2782 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
2783 /* Find dev 'n' in a->info->devs, determine the
2784 * ddf refnum, and set vc->phys_refnum and update
2785 * phys->entries[]
2786 */
2787 /* FIXME */
2788 }
2789 } else {
2790 int old = ddf->phys->entries[pd].state;
2791 if (state & DS_FAULTY)
2792 ddf->phys->entries[pd].state |= __cpu_to_be16(DDF_Failed);
2793 if (state & DS_INSYNC) {
2794 ddf->phys->entries[pd].state |= __cpu_to_be16(DDF_Online);
2795 ddf->phys->entries[pd].state &= __cpu_to_be16(~DDF_Rebuilding);
2796 }
2797 if (old != ddf->phys->entries[pd].state)
2798 ddf->updates_pending = 1;
2799 }
2800
2801 dprintf("ddf: set_disk %d to %x\n", n, state);
2802
2803 /* Now we need to check the state of the array and update
2804 * virtual_disk.entries[n].state.
2805 * It needs to be one of "optimal", "degraded", "failed".
2806 * I don't understand 'deleted' or 'missing'.
2807 */
2808 working = 0;
2809 for (i=0; i < a->info.array.raid_disks; i++) {
2810 pd = find_phys(ddf, vc->phys_refnum[i]);
2811 if (pd < 0)
2812 continue;
2813 st = __be16_to_cpu(ddf->phys->entries[pd].state);
2814 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
2815 == DDF_Online)
2816 working++;
2817 }
2818 state = DDF_state_degraded;
2819 if (working == a->info.array.raid_disks)
2820 state = DDF_state_optimal;
2821 else switch(vc->prl) {
2822 case DDF_RAID0:
2823 case DDF_CONCAT:
2824 case DDF_JBOD:
2825 state = DDF_state_failed;
2826 break;
2827 case DDF_RAID1:
2828 if (working == 0)
2829 state = DDF_state_failed;
2830 break;
2831 case DDF_RAID4:
2832 case DDF_RAID5:
2833 if (working < a->info.array.raid_disks-1)
2834 state = DDF_state_failed;
2835 break;
2836 case DDF_RAID6:
2837 if (working < a->info.array.raid_disks-2)
2838 state = DDF_state_failed;
2839 else if (working == a->info.array.raid_disks-1)
2840 state = DDF_state_part_optimal;
2841 break;
2842 }
2843
2844 if (ddf->virt->entries[inst].state !=
2845 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
2846 | state)) {
2847
2848 ddf->virt->entries[inst].state =
2849 (ddf->virt->entries[inst].state & ~DDF_state_mask)
2850 | state;
2851 ddf->updates_pending = 1;
2852 }
2853
2854 }
2855
2856 static void ddf_sync_metadata(struct supertype *st)
2857 {
2858
2859 /*
2860 * Write all data to all devices.
2861 * Later, we might be able to track whether only local changes
2862 * have been made, or whether any global data has been changed,
2863 * but ddf is sufficiently weird that it probably always
2864 * changes global data ....
2865 */
2866 struct ddf_super *ddf = st->sb;
2867 if (!ddf->updates_pending)
2868 return;
2869 ddf->updates_pending = 0;
2870 __write_init_super_ddf(st, 0);
2871 dprintf("ddf: sync_metadata\n");
2872 }
2873
2874 static void ddf_process_update(struct supertype *st,
2875 struct metadata_update *update)
2876 {
2877 /* Apply this update to the metadata.
2878 * The first 4 bytes are a DDF_*_MAGIC which guides
2879 * our actions.
2880 * Possible update are:
2881 * DDF_PHYS_RECORDS_MAGIC
2882 * Add a new physical device. Changes to this record
2883 * only happen implicitly.
2884 * used_pdes is the device number.
2885 * DDF_VIRT_RECORDS_MAGIC
2886 * Add a new VD. Possibly also change the 'access' bits.
2887 * populated_vdes is the entry number.
2888 * DDF_VD_CONF_MAGIC
2889 * New or updated VD. the VIRT_RECORD must already
2890 * exist. For an update, phys_refnum and lba_offset
2891 * (at least) are updated, and the VD_CONF must
2892 * be written to precisely those devices listed with
2893 * a phys_refnum.
2894 * DDF_SPARE_ASSIGN_MAGIC
2895 * replacement Spare Assignment Record... but for which device?
2896 *
2897 * So, e.g.:
2898 * - to create a new array, we send a VIRT_RECORD and
2899 * a VD_CONF. Then assemble and start the array.
2900 * - to activate a spare we send a VD_CONF to add the phys_refnum
2901 * and offset. This will also mark the spare as active with
2902 * a spare-assignment record.
2903 */
2904 struct ddf_super *ddf = st->sb;
2905 __u32 *magic = (__u32*)update->buf;
2906 struct phys_disk *pd;
2907 struct virtual_disk *vd;
2908 struct vd_config *vc;
2909 struct vcl *vcl;
2910 struct dl *dl;
2911 int mppe;
2912 int ent;
2913
2914 dprintf("Process update %x\n", *magic);
2915
2916 switch (*magic) {
2917 case DDF_PHYS_RECORDS_MAGIC:
2918
2919 if (update->len != (sizeof(struct phys_disk) +
2920 sizeof(struct phys_disk_entry)))
2921 return;
2922 pd = (struct phys_disk*)update->buf;
2923
2924 ent = __be16_to_cpu(pd->used_pdes);
2925 if (ent >= __be16_to_cpu(ddf->phys->max_pdes))
2926 return;
2927 if (!all_ff(ddf->phys->entries[ent].guid))
2928 return;
2929 ddf->phys->entries[ent] = pd->entries[0];
2930 ddf->phys->used_pdes = __cpu_to_be16(1 +
2931 __be16_to_cpu(ddf->phys->used_pdes));
2932 ddf->updates_pending = 1;
2933 if (ddf->add_list) {
2934 struct active_array *a;
2935 struct dl *al = ddf->add_list;
2936 ddf->add_list = al->next;
2937
2938 al->next = ddf->dlist;
2939 ddf->dlist = al;
2940
2941 /* As a device has been added, we should check
2942 * for any degraded devices that might make
2943 * use of this spare */
2944 for (a = st->arrays ; a; a=a->next)
2945 a->check_degraded = 1;
2946 }
2947 break;
2948
2949 case DDF_VIRT_RECORDS_MAGIC:
2950
2951 if (update->len != (sizeof(struct virtual_disk) +
2952 sizeof(struct virtual_entry)))
2953 return;
2954 vd = (struct virtual_disk*)update->buf;
2955
2956 ent = __be16_to_cpu(vd->populated_vdes);
2957 if (ent >= __be16_to_cpu(ddf->virt->max_vdes))
2958 return;
2959 if (!all_ff(ddf->virt->entries[ent].guid))
2960 return;
2961 ddf->virt->entries[ent] = vd->entries[0];
2962 ddf->virt->populated_vdes = __cpu_to_be16(1 +
2963 __be16_to_cpu(ddf->virt->populated_vdes));
2964 ddf->updates_pending = 1;
2965 break;
2966
2967 case DDF_VD_CONF_MAGIC:
2968 dprintf("len %d %d\n", update->len, ddf->conf_rec_len);
2969
2970 mppe = __be16_to_cpu(ddf->anchor.max_primary_element_entries);
2971 if (update->len != ddf->conf_rec_len * 512)
2972 return;
2973 vc = (struct vd_config*)update->buf;
2974 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
2975 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
2976 break;
2977 dprintf("vcl = %p\n", vcl);
2978 if (vcl) {
2979 /* An update, just copy the phys_refnum and lba_offset
2980 * fields
2981 */
2982 memcpy(vcl->conf.phys_refnum, vc->phys_refnum,
2983 mppe * (sizeof(__u32) + sizeof(__u64)));
2984 } else {
2985 /* A new VD_CONF */
2986 vcl = update->space;
2987 update->space = NULL;
2988 vcl->next = ddf->conflist;
2989 memcpy(&vcl->conf, vc, update->len);
2990 vcl->lba_offset = (__u64*)
2991 &vcl->conf.phys_refnum[mppe];
2992 ddf->conflist = vcl;
2993 }
2994 /* Now make sure vlist is correct for each dl. */
2995 for (dl = ddf->dlist; dl; dl = dl->next) {
2996 int dn;
2997 int vn = 0;
2998 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
2999 for (dn=0; dn < ddf->mppe ; dn++)
3000 if (vcl->conf.phys_refnum[dn] ==
3001 dl->disk.refnum) {
3002 dprintf("dev %d has %p at %d\n",
3003 dl->pdnum, vcl, vn);
3004 dl->vlist[vn++] = vcl;
3005 break;
3006 }
3007 while (vn < ddf->max_part)
3008 dl->vlist[vn++] = NULL;
3009 if (dl->vlist[0]) {
3010 ddf->phys->entries[dl->pdnum].type &=
3011 ~__cpu_to_be16(DDF_Global_Spare);
3012 ddf->phys->entries[dl->pdnum].type |=
3013 __cpu_to_be16(DDF_Active_in_VD);
3014 }
3015 if (dl->spare) {
3016 ddf->phys->entries[dl->pdnum].type &=
3017 ~__cpu_to_be16(DDF_Global_Spare);
3018 ddf->phys->entries[dl->pdnum].type |=
3019 __cpu_to_be16(DDF_Spare);
3020 }
3021 if (!dl->vlist[0] && !dl->spare) {
3022 ddf->phys->entries[dl->pdnum].type |=
3023 __cpu_to_be16(DDF_Global_Spare);
3024 ddf->phys->entries[dl->pdnum].type &=
3025 ~__cpu_to_be16(DDF_Spare |
3026 DDF_Active_in_VD);
3027 }
3028 }
3029 ddf->updates_pending = 1;
3030 break;
3031 case DDF_SPARE_ASSIGN_MAGIC:
3032 default: break;
3033 }
3034 }
3035
3036 static void ddf_prepare_update(struct supertype *st,
3037 struct metadata_update *update)
3038 {
3039 /* This update arrived at managemon.
3040 * We are about to pass it to monitor.
3041 * If a malloc is needed, do it here.
3042 */
3043 struct ddf_super *ddf = st->sb;
3044 __u32 *magic = (__u32*)update->buf;
3045 if (*magic == DDF_VD_CONF_MAGIC)
3046 posix_memalign(&update->space, 512,
3047 offsetof(struct vcl, conf)
3048 + ddf->conf_rec_len * 512);
3049 }
3050
3051 /*
3052 * Check if the array 'a' is degraded but not failed.
3053 * If it is, find as many spares as are available and needed and
3054 * arrange for their inclusion.
3055 * We only choose devices which are not already in the array,
3056 * and prefer those with a spare-assignment to this array.
3057 * otherwise we choose global spares - assuming always that
3058 * there is enough room.
3059 * For each spare that we assign, we return an 'mdinfo' which
3060 * describes the position for the device in the array.
3061 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
3062 * the new phys_refnum and lba_offset values.
3063 *
3064 * Only worry about BVDs at the moment.
3065 */
3066 static struct mdinfo *ddf_activate_spare(struct active_array *a,
3067 struct metadata_update **updates)
3068 {
3069 int working = 0;
3070 struct mdinfo *d;
3071 struct ddf_super *ddf = a->container->sb;
3072 int global_ok = 0;
3073 struct mdinfo *rv = NULL;
3074 struct mdinfo *di;
3075 struct metadata_update *mu;
3076 struct dl *dl;
3077 int i;
3078 struct vd_config *vc;
3079 __u64 *lba;
3080
3081 for (d = a->info.devs ; d ; d = d->next) {
3082 if ((d->curr_state & DS_FAULTY) &&
3083 d->state_fd >= 0)
3084 /* wait for Removal to happen */
3085 return NULL;
3086 if (d->state_fd >= 0)
3087 working ++;
3088 }
3089
3090 dprintf("ddf_activate: working=%d (%d) level=%d\n", working, a->info.array.raid_disks,
3091 a->info.array.level);
3092 if (working == a->info.array.raid_disks)
3093 return NULL; /* array not degraded */
3094 switch (a->info.array.level) {
3095 case 1:
3096 if (working == 0)
3097 return NULL; /* failed */
3098 break;
3099 case 4:
3100 case 5:
3101 if (working < a->info.array.raid_disks - 1)
3102 return NULL; /* failed */
3103 break;
3104 case 6:
3105 if (working < a->info.array.raid_disks - 2)
3106 return NULL; /* failed */
3107 break;
3108 default: /* concat or stripe */
3109 return NULL; /* failed */
3110 }
3111
3112 /* For each slot, if it is not working, find a spare */
3113 dl = ddf->dlist;
3114 for (i = 0; i < a->info.array.raid_disks; i++) {
3115 for (d = a->info.devs ; d ; d = d->next)
3116 if (d->disk.raid_disk == i)
3117 break;
3118 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
3119 if (d && (d->state_fd >= 0))
3120 continue;
3121
3122 /* OK, this device needs recovery. Find a spare */
3123 again:
3124 for ( ; dl ; dl = dl->next) {
3125 unsigned long long esize;
3126 unsigned long long pos;
3127 struct mdinfo *d2;
3128 int is_global = 0;
3129 int is_dedicated = 0;
3130 struct extent *ex;
3131 int j;
3132 /* If in this array, skip */
3133 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
3134 if (d2->disk.major == dl->major &&
3135 d2->disk.minor == dl->minor) {
3136 dprintf("%x:%x already in array\n", dl->major, dl->minor);
3137 break;
3138 }
3139 if (d2)
3140 continue;
3141 if (ddf->phys->entries[dl->pdnum].type &
3142 __cpu_to_be16(DDF_Spare)) {
3143 /* Check spare assign record */
3144 if (dl->spare) {
3145 if (dl->spare->type & DDF_spare_dedicated) {
3146 /* check spare_ents for guid */
3147 for (j = 0 ;
3148 j < __be16_to_cpu(dl->spare->populated);
3149 j++) {
3150 if (memcmp(dl->spare->spare_ents[j].guid,
3151 ddf->virt->entries[a->info.container_member].guid,
3152 DDF_GUID_LEN) == 0)
3153 is_dedicated = 1;
3154 }
3155 } else
3156 is_global = 1;
3157 }
3158 } else if (ddf->phys->entries[dl->pdnum].type &
3159 __cpu_to_be16(DDF_Global_Spare)) {
3160 is_global = 1;
3161 }
3162 if ( ! (is_dedicated ||
3163 (is_global && global_ok))) {
3164 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
3165 is_dedicated, is_global);
3166 continue;
3167 }
3168
3169 /* We are allowed to use this device - is there space?
3170 * We need a->info.component_size sectors */
3171 ex = get_extents(ddf, dl);
3172 if (!ex) {
3173 dprintf("cannot get extents\n");
3174 continue;
3175 }
3176 j = 0; pos = 0;
3177 esize = 0;
3178
3179 do {
3180 esize = ex[j].start - pos;
3181 if (esize >= a->info.component_size)
3182 break;
3183 pos = ex[i].start + ex[i].size;
3184 i++;
3185 } while (ex[i-1].size);
3186
3187 free(ex);
3188 if (esize < a->info.component_size) {
3189 dprintf("%x:%x has no room: %llu %llu\n", dl->major, dl->minor,
3190 esize, a->info.component_size);
3191 /* No room */
3192 continue;
3193 }
3194
3195 /* Cool, we have a device with some space at pos */
3196 di = malloc(sizeof(*di));
3197 memset(di, 0, sizeof(*di));
3198 di->disk.number = i;
3199 di->disk.raid_disk = i;
3200 di->disk.major = dl->major;
3201 di->disk.minor = dl->minor;
3202 di->disk.state = 0;
3203 di->data_offset = pos;
3204 di->component_size = a->info.component_size;
3205 di->container_member = dl->pdnum;
3206 di->next = rv;
3207 rv = di;
3208 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
3209 i, pos);
3210
3211 break;
3212 }
3213 if (!dl && ! global_ok) {
3214 /* not enough dedicated spares, try global */
3215 global_ok = 1;
3216 dl = ddf->dlist;
3217 goto again;
3218 }
3219 }
3220
3221 if (!rv)
3222 /* No spares found */
3223 return rv;
3224 /* Now 'rv' has a list of devices to return.
3225 * Create a metadata_update record to update the
3226 * phys_refnum and lba_offset values
3227 */
3228 mu = malloc(sizeof(*mu));
3229 mu->buf = malloc(ddf->conf_rec_len * 512);
3230 posix_memalign(&mu->space, 512, sizeof(struct vcl));
3231 mu->len = ddf->conf_rec_len;
3232 mu->next = *updates;
3233 vc = find_vdcr(ddf, a->info.container_member);
3234 memcpy(mu->buf, vc, ddf->conf_rec_len * 512);
3235
3236 vc = (struct vd_config*)mu->buf;
3237 lba = (__u64*)&vc->phys_refnum[ddf->mppe];
3238 for (di = rv ; di ; di = di->next) {
3239 vc->phys_refnum[di->disk.raid_disk] =
3240 ddf->phys->entries[dl->pdnum].refnum;
3241 lba[di->disk.raid_disk] = di->data_offset;
3242 }
3243 *updates = mu;
3244 return rv;
3245 }
3246
3247 struct superswitch super_ddf = {
3248 #ifndef MDASSEMBLE
3249 .examine_super = examine_super_ddf,
3250 .brief_examine_super = brief_examine_super_ddf,
3251 .detail_super = detail_super_ddf,
3252 .brief_detail_super = brief_detail_super_ddf,
3253 .validate_geometry = validate_geometry_ddf,
3254 .write_init_super = write_init_super_ddf,
3255 #endif
3256 .match_home = match_home_ddf,
3257 .uuid_from_super= uuid_from_super_ddf,
3258 .getinfo_super = getinfo_super_ddf,
3259 .update_super = update_super_ddf,
3260
3261 .avail_size = avail_size_ddf,
3262
3263 .compare_super = compare_super_ddf,
3264
3265 .load_super = load_super_ddf,
3266 .init_super = init_super_ddf,
3267 .store_super = store_zero_ddf,
3268 .free_super = free_super_ddf,
3269 .match_metadata_desc = match_metadata_desc_ddf,
3270 .add_to_super = add_to_super_ddf,
3271 .container_content = container_content_ddf,
3272
3273 .external = 1,
3274
3275 /* for mdmon */
3276 .open_new = ddf_open_new,
3277 .set_array_state= ddf_set_array_state,
3278 .set_disk = ddf_set_disk,
3279 .sync_metadata = ddf_sync_metadata,
3280 .process_update = ddf_process_update,
3281 .prepare_update = ddf_prepare_update,
3282 .activate_spare = ddf_activate_spare,
3283
3284 };