]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
56aad684db8c8ddc491fb1e080c87b2cd31c7d1f
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* The DDF metadata handling.
51 * DDF metadata lives at the end of the device.
52 * The last 512 byte block provides an 'anchor' which is used to locate
53 * the rest of the metadata which usually lives immediately behind the anchor.
54 *
55 * Note:
56 * - all multibyte numeric fields are bigendian.
57 * - all strings are space padded.
58 *
59 */
60
61 /* Primary Raid Level (PRL) */
62 #define DDF_RAID0 0x00
63 #define DDF_RAID1 0x01
64 #define DDF_RAID3 0x03
65 #define DDF_RAID4 0x04
66 #define DDF_RAID5 0x05
67 #define DDF_RAID1E 0x11
68 #define DDF_JBOD 0x0f
69 #define DDF_CONCAT 0x1f
70 #define DDF_RAID5E 0x15
71 #define DDF_RAID5EE 0x25
72 #define DDF_RAID6 0x06
73
74 /* Raid Level Qualifier (RLQ) */
75 #define DDF_RAID0_SIMPLE 0x00
76 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
77 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
78 #define DDF_RAID3_0 0x00 /* parity in first extent */
79 #define DDF_RAID3_N 0x01 /* parity in last extent */
80 #define DDF_RAID4_0 0x00 /* parity in first extent */
81 #define DDF_RAID4_N 0x01 /* parity in last extent */
82 /* these apply to raid5e and raid5ee as well */
83 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
84 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
85 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
86 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
87
88 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
89 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
90
91 /* Secondary RAID Level (SRL) */
92 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
93 #define DDF_2MIRRORED 0x01
94 #define DDF_2CONCAT 0x02
95 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
96
97 /* Magic numbers */
98 #define DDF_HEADER_MAGIC __cpu_to_be32(0xDE11DE11)
99 #define DDF_CONTROLLER_MAGIC __cpu_to_be32(0xAD111111)
100 #define DDF_PHYS_RECORDS_MAGIC __cpu_to_be32(0x22222222)
101 #define DDF_PHYS_DATA_MAGIC __cpu_to_be32(0x33333333)
102 #define DDF_VIRT_RECORDS_MAGIC __cpu_to_be32(0xDDDDDDDD)
103 #define DDF_VD_CONF_MAGIC __cpu_to_be32(0xEEEEEEEE)
104 #define DDF_SPARE_ASSIGN_MAGIC __cpu_to_be32(0x55555555)
105 #define DDF_VU_CONF_MAGIC __cpu_to_be32(0x88888888)
106 #define DDF_VENDOR_LOG_MAGIC __cpu_to_be32(0x01dBEEF0)
107 #define DDF_BBM_LOG_MAGIC __cpu_to_be32(0xABADB10C)
108
109 #define DDF_GUID_LEN 24
110 #define DDF_REVISION_0 "01.00.00"
111 #define DDF_REVISION_2 "01.02.00"
112
113 struct ddf_header {
114 __u32 magic; /* DDF_HEADER_MAGIC */
115 __u32 crc;
116 char guid[DDF_GUID_LEN];
117 char revision[8]; /* 01.02.00 */
118 __u32 seq; /* starts at '1' */
119 __u32 timestamp;
120 __u8 openflag;
121 __u8 foreignflag;
122 __u8 enforcegroups;
123 __u8 pad0; /* 0xff */
124 __u8 pad1[12]; /* 12 * 0xff */
125 /* 64 bytes so far */
126 __u8 header_ext[32]; /* reserved: fill with 0xff */
127 __u64 primary_lba;
128 __u64 secondary_lba;
129 __u8 type;
130 __u8 pad2[3]; /* 0xff */
131 __u32 workspace_len; /* sectors for vendor space -
132 * at least 32768(sectors) */
133 __u64 workspace_lba;
134 __u16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
135 __u16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
136 __u16 max_partitions; /* i.e. max num of configuration
137 record entries per disk */
138 __u16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
139 *12/512) */
140 __u16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
141 __u8 pad3[54]; /* 0xff */
142 /* 192 bytes so far */
143 __u32 controller_section_offset;
144 __u32 controller_section_length;
145 __u32 phys_section_offset;
146 __u32 phys_section_length;
147 __u32 virt_section_offset;
148 __u32 virt_section_length;
149 __u32 config_section_offset;
150 __u32 config_section_length;
151 __u32 data_section_offset;
152 __u32 data_section_length;
153 __u32 bbm_section_offset;
154 __u32 bbm_section_length;
155 __u32 diag_space_offset;
156 __u32 diag_space_length;
157 __u32 vendor_offset;
158 __u32 vendor_length;
159 /* 256 bytes so far */
160 __u8 pad4[256]; /* 0xff */
161 };
162
163 /* type field */
164 #define DDF_HEADER_ANCHOR 0x00
165 #define DDF_HEADER_PRIMARY 0x01
166 #define DDF_HEADER_SECONDARY 0x02
167
168 /* The content of the 'controller section' - global scope */
169 struct ddf_controller_data {
170 __u32 magic; /* DDF_CONTROLLER_MAGIC */
171 __u32 crc;
172 char guid[DDF_GUID_LEN];
173 struct controller_type {
174 __u16 vendor_id;
175 __u16 device_id;
176 __u16 sub_vendor_id;
177 __u16 sub_device_id;
178 } type;
179 char product_id[16];
180 __u8 pad[8]; /* 0xff */
181 __u8 vendor_data[448];
182 };
183
184 /* The content of phys_section - global scope */
185 struct phys_disk {
186 __u32 magic; /* DDF_PHYS_RECORDS_MAGIC */
187 __u32 crc;
188 __u16 used_pdes;
189 __u16 max_pdes;
190 __u8 pad[52];
191 struct phys_disk_entry {
192 char guid[DDF_GUID_LEN];
193 __u32 refnum;
194 __u16 type;
195 __u16 state;
196 __u64 config_size; /* DDF structures must be after here */
197 char path[18]; /* another horrible structure really */
198 __u8 pad[6];
199 } entries[0];
200 };
201
202 /* phys_disk_entry.type is a bitmap - bigendian remember */
203 #define DDF_Forced_PD_GUID 1
204 #define DDF_Active_in_VD 2
205 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
206 #define DDF_Spare 8 /* overrides Global_spare */
207 #define DDF_Foreign 16
208 #define DDF_Legacy 32 /* no DDF on this device */
209
210 #define DDF_Interface_mask 0xf00
211 #define DDF_Interface_SCSI 0x100
212 #define DDF_Interface_SAS 0x200
213 #define DDF_Interface_SATA 0x300
214 #define DDF_Interface_FC 0x400
215
216 /* phys_disk_entry.state is a bigendian bitmap */
217 #define DDF_Online 1
218 #define DDF_Failed 2 /* overrides 1,4,8 */
219 #define DDF_Rebuilding 4
220 #define DDF_Transition 8
221 #define DDF_SMART 16
222 #define DDF_ReadErrors 32
223 #define DDF_Missing 64
224
225 /* The content of the virt_section global scope */
226 struct virtual_disk {
227 __u32 magic; /* DDF_VIRT_RECORDS_MAGIC */
228 __u32 crc;
229 __u16 populated_vdes;
230 __u16 max_vdes;
231 __u8 pad[52];
232 struct virtual_entry {
233 char guid[DDF_GUID_LEN];
234 __u16 unit;
235 __u16 pad0; /* 0xffff */
236 __u16 guid_crc;
237 __u16 type;
238 __u8 state;
239 __u8 init_state;
240 __u8 pad1[14];
241 char name[16];
242 } entries[0];
243 };
244
245 /* virtual_entry.type is a bitmap - bigendian */
246 #define DDF_Shared 1
247 #define DDF_Enforce_Groups 2
248 #define DDF_Unicode 4
249 #define DDF_Owner_Valid 8
250
251 /* virtual_entry.state is a bigendian bitmap */
252 #define DDF_state_mask 0x7
253 #define DDF_state_optimal 0x0
254 #define DDF_state_degraded 0x1
255 #define DDF_state_deleted 0x2
256 #define DDF_state_missing 0x3
257 #define DDF_state_failed 0x4
258 #define DDF_state_part_optimal 0x5
259
260 #define DDF_state_morphing 0x8
261 #define DDF_state_inconsistent 0x10
262
263 /* virtual_entry.init_state is a bigendian bitmap */
264 #define DDF_initstate_mask 0x03
265 #define DDF_init_not 0x00
266 #define DDF_init_quick 0x01 /* initialisation is progress.
267 * i.e. 'state_inconsistent' */
268 #define DDF_init_full 0x02
269
270 #define DDF_access_mask 0xc0
271 #define DDF_access_rw 0x00
272 #define DDF_access_ro 0x80
273 #define DDF_access_blocked 0xc0
274
275 /* The content of the config_section - local scope
276 * It has multiple records each config_record_len sectors
277 * They can be vd_config or spare_assign
278 */
279
280 struct vd_config {
281 __u32 magic; /* DDF_VD_CONF_MAGIC */
282 __u32 crc;
283 char guid[DDF_GUID_LEN];
284 __u32 timestamp;
285 __u32 seqnum;
286 __u8 pad0[24];
287 __u16 prim_elmnt_count;
288 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
289 __u8 prl;
290 __u8 rlq;
291 __u8 sec_elmnt_count;
292 __u8 sec_elmnt_seq;
293 __u8 srl;
294 __u64 blocks; /* blocks per component could be different
295 * on different component devices...(only
296 * for concat I hope) */
297 __u64 array_blocks; /* blocks in array */
298 __u8 pad1[8];
299 __u32 spare_refs[8];
300 __u8 cache_pol[8];
301 __u8 bg_rate;
302 __u8 pad2[3];
303 __u8 pad3[52];
304 __u8 pad4[192];
305 __u8 v0[32]; /* reserved- 0xff */
306 __u8 v1[32]; /* reserved- 0xff */
307 __u8 v2[16]; /* reserved- 0xff */
308 __u8 v3[16]; /* reserved- 0xff */
309 __u8 vendor[32];
310 __u32 phys_refnum[0]; /* refnum of each disk in sequence */
311 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
312 bvd are always the same size */
313 };
314
315 /* vd_config.cache_pol[7] is a bitmap */
316 #define DDF_cache_writeback 1 /* else writethrough */
317 #define DDF_cache_wadaptive 2 /* only applies if writeback */
318 #define DDF_cache_readahead 4
319 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
320 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
321 #define DDF_cache_wallowed 32 /* enable write caching */
322 #define DDF_cache_rallowed 64 /* enable read caching */
323
324 struct spare_assign {
325 __u32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
326 __u32 crc;
327 __u32 timestamp;
328 __u8 reserved[7];
329 __u8 type;
330 __u16 populated; /* SAEs used */
331 __u16 max; /* max SAEs */
332 __u8 pad[8];
333 struct spare_assign_entry {
334 char guid[DDF_GUID_LEN];
335 __u16 secondary_element;
336 __u8 pad[6];
337 } spare_ents[0];
338 };
339 /* spare_assign.type is a bitmap */
340 #define DDF_spare_dedicated 0x1 /* else global */
341 #define DDF_spare_revertible 0x2 /* else committable */
342 #define DDF_spare_active 0x4 /* else not active */
343 #define DDF_spare_affinity 0x8 /* enclosure affinity */
344
345 /* The data_section contents - local scope */
346 struct disk_data {
347 __u32 magic; /* DDF_PHYS_DATA_MAGIC */
348 __u32 crc;
349 char guid[DDF_GUID_LEN];
350 __u32 refnum; /* crc of some magic drive data ... */
351 __u8 forced_ref; /* set when above was not result of magic */
352 __u8 forced_guid; /* set if guid was forced rather than magic */
353 __u8 vendor[32];
354 __u8 pad[442];
355 };
356
357 /* bbm_section content */
358 struct bad_block_log {
359 __u32 magic;
360 __u32 crc;
361 __u16 entry_count;
362 __u32 spare_count;
363 __u8 pad[10];
364 __u64 first_spare;
365 struct mapped_block {
366 __u64 defective_start;
367 __u32 replacement_start;
368 __u16 remap_count;
369 __u8 pad[2];
370 } entries[0];
371 };
372
373 /* Struct for internally holding ddf structures */
374 /* The DDF structure stored on each device is potentially
375 * quite different, as some data is global and some is local.
376 * The global data is:
377 * - ddf header
378 * - controller_data
379 * - Physical disk records
380 * - Virtual disk records
381 * The local data is:
382 * - Configuration records
383 * - Physical Disk data section
384 * ( and Bad block and vendor which I don't care about yet).
385 *
386 * The local data is parsed into separate lists as it is read
387 * and reconstructed for writing. This means that we only need
388 * to make config changes once and they are automatically
389 * propagated to all devices.
390 * Note that the ddf_super has space of the conf and disk data
391 * for this disk and also for a list of all such data.
392 * The list is only used for the superblock that is being
393 * built in Create or Assemble to describe the whole array.
394 */
395 struct ddf_super {
396 struct ddf_header anchor, primary, secondary;
397 struct ddf_controller_data controller;
398 struct ddf_header *active;
399 struct phys_disk *phys;
400 struct virtual_disk *virt;
401 int pdsize, vdsize;
402 unsigned int max_part, mppe, conf_rec_len;
403 int currentdev;
404 int updates_pending;
405 struct vcl {
406 union {
407 char space[512];
408 struct {
409 struct vcl *next;
410 __u64 *lba_offset; /* location in 'conf' of
411 * the lba table */
412 unsigned int vcnum; /* index into ->virt */
413 struct vd_config **other_bvds;
414 __u64 *block_sizes; /* NULL if all the same */
415 };
416 };
417 struct vd_config conf;
418 } *conflist, *currentconf;
419 struct dl {
420 union {
421 char space[512];
422 struct {
423 struct dl *next;
424 int major, minor;
425 char *devname;
426 int fd;
427 unsigned long long size; /* sectors */
428 unsigned long long primary_lba; /* sectors */
429 unsigned long long secondary_lba; /* sectors */
430 unsigned long long workspace_lba; /* sectors */
431 int pdnum; /* index in ->phys */
432 struct spare_assign *spare;
433 void *mdupdate; /* hold metadata update */
434
435 /* These fields used by auto-layout */
436 int raiddisk; /* slot to fill in autolayout */
437 __u64 esize;
438 };
439 };
440 struct disk_data disk;
441 struct vcl *vlist[0]; /* max_part in size */
442 } *dlist, *add_list;
443 };
444
445 #ifndef offsetof
446 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
447 #endif
448
449 #if DEBUG
450 static int all_ff(char *guid);
451 static void pr_state(struct ddf_super *ddf, const char *msg)
452 {
453 unsigned int i;
454 dprintf("%s/%s: ", __func__, msg);
455 for (i = 0; i < __be16_to_cpu(ddf->active->max_vd_entries); i++) {
456 if (all_ff(ddf->virt->entries[i].guid))
457 continue;
458 dprintf("%u(s=%02x i=%02x) ", i,
459 ddf->virt->entries[i].state,
460 ddf->virt->entries[i].init_state);
461 }
462 dprintf("\n");
463 }
464 #else
465 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
466 #endif
467
468 #define ddf_set_updates_pending(x) \
469 do { (x)->updates_pending = 1; pr_state(x, __func__); } while (0)
470
471 static unsigned int calc_crc(void *buf, int len)
472 {
473 /* crcs are always at the same place as in the ddf_header */
474 struct ddf_header *ddf = buf;
475 __u32 oldcrc = ddf->crc;
476 __u32 newcrc;
477 ddf->crc = 0xffffffff;
478
479 newcrc = crc32(0, buf, len);
480 ddf->crc = oldcrc;
481 /* The crc is store (like everything) bigendian, so convert
482 * here for simplicity
483 */
484 return __cpu_to_be32(newcrc);
485 }
486
487 static int load_ddf_header(int fd, unsigned long long lba,
488 unsigned long long size,
489 int type,
490 struct ddf_header *hdr, struct ddf_header *anchor)
491 {
492 /* read a ddf header (primary or secondary) from fd/lba
493 * and check that it is consistent with anchor
494 * Need to check:
495 * magic, crc, guid, rev, and LBA's header_type, and
496 * everything after header_type must be the same
497 */
498 if (lba >= size-1)
499 return 0;
500
501 if (lseek64(fd, lba<<9, 0) < 0)
502 return 0;
503
504 if (read(fd, hdr, 512) != 512)
505 return 0;
506
507 if (hdr->magic != DDF_HEADER_MAGIC)
508 return 0;
509 if (calc_crc(hdr, 512) != hdr->crc)
510 return 0;
511 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
512 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
513 anchor->primary_lba != hdr->primary_lba ||
514 anchor->secondary_lba != hdr->secondary_lba ||
515 hdr->type != type ||
516 memcmp(anchor->pad2, hdr->pad2, 512 -
517 offsetof(struct ddf_header, pad2)) != 0)
518 return 0;
519
520 /* Looks good enough to me... */
521 return 1;
522 }
523
524 static void *load_section(int fd, struct ddf_super *super, void *buf,
525 __u32 offset_be, __u32 len_be, int check)
526 {
527 unsigned long long offset = __be32_to_cpu(offset_be);
528 unsigned long long len = __be32_to_cpu(len_be);
529 int dofree = (buf == NULL);
530
531 if (check)
532 if (len != 2 && len != 8 && len != 32
533 && len != 128 && len != 512)
534 return NULL;
535
536 if (len > 1024)
537 return NULL;
538 if (buf) {
539 /* All pre-allocated sections are a single block */
540 if (len != 1)
541 return NULL;
542 } else if (posix_memalign(&buf, 512, len<<9) != 0)
543 buf = NULL;
544
545 if (!buf)
546 return NULL;
547
548 if (super->active->type == 1)
549 offset += __be64_to_cpu(super->active->primary_lba);
550 else
551 offset += __be64_to_cpu(super->active->secondary_lba);
552
553 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
554 if (dofree)
555 free(buf);
556 return NULL;
557 }
558 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
559 if (dofree)
560 free(buf);
561 return NULL;
562 }
563 return buf;
564 }
565
566 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
567 {
568 unsigned long long dsize;
569
570 get_dev_size(fd, NULL, &dsize);
571
572 if (lseek64(fd, dsize-512, 0) < 0) {
573 if (devname)
574 pr_err("Cannot seek to anchor block on %s: %s\n",
575 devname, strerror(errno));
576 return 1;
577 }
578 if (read(fd, &super->anchor, 512) != 512) {
579 if (devname)
580 pr_err("Cannot read anchor block on %s: %s\n",
581 devname, strerror(errno));
582 return 1;
583 }
584 if (super->anchor.magic != DDF_HEADER_MAGIC) {
585 if (devname)
586 pr_err("no DDF anchor found on %s\n",
587 devname);
588 return 2;
589 }
590 if (calc_crc(&super->anchor, 512) != super->anchor.crc) {
591 if (devname)
592 pr_err("bad CRC on anchor on %s\n",
593 devname);
594 return 2;
595 }
596 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
597 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
598 if (devname)
599 pr_err("can only support super revision"
600 " %.8s and earlier, not %.8s on %s\n",
601 DDF_REVISION_2, super->anchor.revision,devname);
602 return 2;
603 }
604 if (load_ddf_header(fd, __be64_to_cpu(super->anchor.primary_lba),
605 dsize >> 9, 1,
606 &super->primary, &super->anchor) == 0) {
607 if (devname)
608 pr_err("Failed to load primary DDF header "
609 "on %s\n", devname);
610 return 2;
611 }
612 super->active = &super->primary;
613 if (load_ddf_header(fd, __be64_to_cpu(super->anchor.secondary_lba),
614 dsize >> 9, 2,
615 &super->secondary, &super->anchor)) {
616 if ((__be32_to_cpu(super->primary.seq)
617 < __be32_to_cpu(super->secondary.seq) &&
618 !super->secondary.openflag)
619 || (__be32_to_cpu(super->primary.seq)
620 == __be32_to_cpu(super->secondary.seq) &&
621 super->primary.openflag && !super->secondary.openflag)
622 )
623 super->active = &super->secondary;
624 }
625 return 0;
626 }
627
628 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
629 {
630 void *ok;
631 ok = load_section(fd, super, &super->controller,
632 super->active->controller_section_offset,
633 super->active->controller_section_length,
634 0);
635 super->phys = load_section(fd, super, NULL,
636 super->active->phys_section_offset,
637 super->active->phys_section_length,
638 1);
639 super->pdsize = __be32_to_cpu(super->active->phys_section_length) * 512;
640
641 super->virt = load_section(fd, super, NULL,
642 super->active->virt_section_offset,
643 super->active->virt_section_length,
644 1);
645 super->vdsize = __be32_to_cpu(super->active->virt_section_length) * 512;
646 if (!ok ||
647 !super->phys ||
648 !super->virt) {
649 free(super->phys);
650 free(super->virt);
651 super->phys = NULL;
652 super->virt = NULL;
653 return 2;
654 }
655 super->conflist = NULL;
656 super->dlist = NULL;
657
658 super->max_part = __be16_to_cpu(super->active->max_partitions);
659 super->mppe = __be16_to_cpu(super->active->max_primary_element_entries);
660 super->conf_rec_len = __be16_to_cpu(super->active->config_record_len);
661 return 0;
662 }
663
664 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
665 unsigned int len)
666 {
667 int i;
668 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
669 if (vcl->other_bvds[i] != NULL &&
670 vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
671 break;
672
673 if (i < vcl->conf.sec_elmnt_count-1) {
674 if (vd->seqnum <= vcl->other_bvds[i]->seqnum)
675 return;
676 } else {
677 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
678 if (vcl->other_bvds[i] == NULL)
679 break;
680 if (i == vcl->conf.sec_elmnt_count-1) {
681 pr_err("no space for sec level config %u, count is %u\n",
682 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
683 return;
684 }
685 if (posix_memalign((void **)&vcl->other_bvds[i], 512, len)
686 != 0) {
687 pr_err("%s could not allocate vd buf\n", __func__);
688 return;
689 }
690 }
691 memcpy(vcl->other_bvds[i], vd, len);
692 }
693
694 static int load_ddf_local(int fd, struct ddf_super *super,
695 char *devname, int keep)
696 {
697 struct dl *dl;
698 struct stat stb;
699 char *conf;
700 unsigned int i;
701 unsigned int confsec;
702 int vnum;
703 unsigned int max_virt_disks = __be16_to_cpu(super->active->max_vd_entries);
704 unsigned long long dsize;
705
706 /* First the local disk info */
707 if (posix_memalign((void**)&dl, 512,
708 sizeof(*dl) +
709 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
710 pr_err("%s could not allocate disk info buffer\n",
711 __func__);
712 return 1;
713 }
714
715 load_section(fd, super, &dl->disk,
716 super->active->data_section_offset,
717 super->active->data_section_length,
718 0);
719 dl->devname = devname ? xstrdup(devname) : NULL;
720
721 fstat(fd, &stb);
722 dl->major = major(stb.st_rdev);
723 dl->minor = minor(stb.st_rdev);
724 dl->next = super->dlist;
725 dl->fd = keep ? fd : -1;
726
727 dl->size = 0;
728 if (get_dev_size(fd, devname, &dsize))
729 dl->size = dsize >> 9;
730 /* If the disks have different sizes, the LBAs will differ
731 * between phys disks.
732 * At this point here, the values in super->active must be valid
733 * for this phys disk. */
734 dl->primary_lba = super->active->primary_lba;
735 dl->secondary_lba = super->active->secondary_lba;
736 dl->workspace_lba = super->active->workspace_lba;
737 dl->spare = NULL;
738 for (i = 0 ; i < super->max_part ; i++)
739 dl->vlist[i] = NULL;
740 super->dlist = dl;
741 dl->pdnum = -1;
742 for (i = 0; i < __be16_to_cpu(super->active->max_pd_entries); i++)
743 if (memcmp(super->phys->entries[i].guid,
744 dl->disk.guid, DDF_GUID_LEN) == 0)
745 dl->pdnum = i;
746
747 /* Now the config list. */
748 /* 'conf' is an array of config entries, some of which are
749 * probably invalid. Those which are good need to be copied into
750 * the conflist
751 */
752
753 conf = load_section(fd, super, NULL,
754 super->active->config_section_offset,
755 super->active->config_section_length,
756 0);
757
758 vnum = 0;
759 for (confsec = 0;
760 confsec < __be32_to_cpu(super->active->config_section_length);
761 confsec += super->conf_rec_len) {
762 struct vd_config *vd =
763 (struct vd_config *)((char*)conf + confsec*512);
764 struct vcl *vcl;
765
766 if (vd->magic == DDF_SPARE_ASSIGN_MAGIC) {
767 if (dl->spare)
768 continue;
769 if (posix_memalign((void**)&dl->spare, 512,
770 super->conf_rec_len*512) != 0) {
771 pr_err("%s could not allocate spare info buf\n",
772 __func__);
773 return 1;
774 }
775
776 memcpy(dl->spare, vd, super->conf_rec_len*512);
777 continue;
778 }
779 if (vd->magic != DDF_VD_CONF_MAGIC)
780 continue;
781 for (vcl = super->conflist; vcl; vcl = vcl->next) {
782 if (memcmp(vcl->conf.guid,
783 vd->guid, DDF_GUID_LEN) == 0)
784 break;
785 }
786
787 if (vcl) {
788 dl->vlist[vnum++] = vcl;
789 if (vcl->other_bvds != NULL &&
790 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
791 add_other_bvd(vcl, vd, super->conf_rec_len*512);
792 continue;
793 }
794 if (__be32_to_cpu(vd->seqnum) <=
795 __be32_to_cpu(vcl->conf.seqnum))
796 continue;
797 } else {
798 if (posix_memalign((void**)&vcl, 512,
799 (super->conf_rec_len*512 +
800 offsetof(struct vcl, conf))) != 0) {
801 pr_err("%s could not allocate vcl buf\n",
802 __func__);
803 return 1;
804 }
805 vcl->next = super->conflist;
806 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
807 if (vd->sec_elmnt_count > 1)
808 vcl->other_bvds =
809 xcalloc(vd->sec_elmnt_count - 1,
810 sizeof(struct vd_config *));
811 else
812 vcl->other_bvds = NULL;
813 super->conflist = vcl;
814 dl->vlist[vnum++] = vcl;
815 }
816 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
817 vcl->lba_offset = (__u64*)
818 &vcl->conf.phys_refnum[super->mppe];
819
820 for (i=0; i < max_virt_disks ; i++)
821 if (memcmp(super->virt->entries[i].guid,
822 vcl->conf.guid, DDF_GUID_LEN)==0)
823 break;
824 if (i < max_virt_disks)
825 vcl->vcnum = i;
826 }
827 free(conf);
828
829 return 0;
830 }
831
832 #ifndef MDASSEMBLE
833 static int load_super_ddf_all(struct supertype *st, int fd,
834 void **sbp, char *devname);
835 #endif
836
837 static void free_super_ddf(struct supertype *st);
838
839 static int load_super_ddf(struct supertype *st, int fd,
840 char *devname)
841 {
842 unsigned long long dsize;
843 struct ddf_super *super;
844 int rv;
845
846 if (get_dev_size(fd, devname, &dsize) == 0)
847 return 1;
848
849 if (!st->ignore_hw_compat && test_partition(fd))
850 /* DDF is not allowed on partitions */
851 return 1;
852
853 /* 32M is a lower bound */
854 if (dsize <= 32*1024*1024) {
855 if (devname)
856 pr_err("%s is too small for ddf: "
857 "size is %llu sectors.\n",
858 devname, dsize>>9);
859 return 1;
860 }
861 if (dsize & 511) {
862 if (devname)
863 pr_err("%s is an odd size for ddf: "
864 "size is %llu bytes.\n",
865 devname, dsize);
866 return 1;
867 }
868
869 free_super_ddf(st);
870
871 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
872 pr_err("malloc of %zu failed.\n",
873 sizeof(*super));
874 return 1;
875 }
876 memset(super, 0, sizeof(*super));
877
878 rv = load_ddf_headers(fd, super, devname);
879 if (rv) {
880 free(super);
881 return rv;
882 }
883
884 /* Have valid headers and have chosen the best. Let's read in the rest*/
885
886 rv = load_ddf_global(fd, super, devname);
887
888 if (rv) {
889 if (devname)
890 pr_err("Failed to load all information "
891 "sections on %s\n", devname);
892 free(super);
893 return rv;
894 }
895
896 rv = load_ddf_local(fd, super, devname, 0);
897
898 if (rv) {
899 if (devname)
900 pr_err("Failed to load all information "
901 "sections on %s\n", devname);
902 free(super);
903 return rv;
904 }
905
906 /* Should possibly check the sections .... */
907
908 st->sb = super;
909 if (st->ss == NULL) {
910 st->ss = &super_ddf;
911 st->minor_version = 0;
912 st->max_devs = 512;
913 }
914 return 0;
915
916 }
917
918 static void free_super_ddf(struct supertype *st)
919 {
920 struct ddf_super *ddf = st->sb;
921 if (ddf == NULL)
922 return;
923 free(ddf->phys);
924 free(ddf->virt);
925 while (ddf->conflist) {
926 struct vcl *v = ddf->conflist;
927 ddf->conflist = v->next;
928 if (v->block_sizes)
929 free(v->block_sizes);
930 if (v->other_bvds) {
931 int i;
932 for (i = 0; i < v->conf.sec_elmnt_count-1; i++)
933 if (v->other_bvds[i] != NULL)
934 free(v->other_bvds[i]);
935 free(v->other_bvds);
936 }
937 free(v);
938 }
939 while (ddf->dlist) {
940 struct dl *d = ddf->dlist;
941 ddf->dlist = d->next;
942 if (d->fd >= 0)
943 close(d->fd);
944 if (d->spare)
945 free(d->spare);
946 free(d);
947 }
948 while (ddf->add_list) {
949 struct dl *d = ddf->add_list;
950 ddf->add_list = d->next;
951 if (d->fd >= 0)
952 close(d->fd);
953 if (d->spare)
954 free(d->spare);
955 free(d);
956 }
957 free(ddf);
958 st->sb = NULL;
959 }
960
961 static struct supertype *match_metadata_desc_ddf(char *arg)
962 {
963 /* 'ddf' only support containers */
964 struct supertype *st;
965 if (strcmp(arg, "ddf") != 0 &&
966 strcmp(arg, "default") != 0
967 )
968 return NULL;
969
970 st = xcalloc(1, sizeof(*st));
971 st->ss = &super_ddf;
972 st->max_devs = 512;
973 st->minor_version = 0;
974 st->sb = NULL;
975 return st;
976 }
977
978 #ifndef MDASSEMBLE
979
980 static mapping_t ddf_state[] = {
981 { "Optimal", 0},
982 { "Degraded", 1},
983 { "Deleted", 2},
984 { "Missing", 3},
985 { "Failed", 4},
986 { "Partially Optimal", 5},
987 { "-reserved-", 6},
988 { "-reserved-", 7},
989 { NULL, 0}
990 };
991
992 static mapping_t ddf_init_state[] = {
993 { "Not Initialised", 0},
994 { "QuickInit in Progress", 1},
995 { "Fully Initialised", 2},
996 { "*UNKNOWN*", 3},
997 { NULL, 0}
998 };
999 static mapping_t ddf_access[] = {
1000 { "Read/Write", 0},
1001 { "Reserved", 1},
1002 { "Read Only", 2},
1003 { "Blocked (no access)", 3},
1004 { NULL ,0}
1005 };
1006
1007 static mapping_t ddf_level[] = {
1008 { "RAID0", DDF_RAID0},
1009 { "RAID1", DDF_RAID1},
1010 { "RAID3", DDF_RAID3},
1011 { "RAID4", DDF_RAID4},
1012 { "RAID5", DDF_RAID5},
1013 { "RAID1E",DDF_RAID1E},
1014 { "JBOD", DDF_JBOD},
1015 { "CONCAT",DDF_CONCAT},
1016 { "RAID5E",DDF_RAID5E},
1017 { "RAID5EE",DDF_RAID5EE},
1018 { "RAID6", DDF_RAID6},
1019 { NULL, 0}
1020 };
1021 static mapping_t ddf_sec_level[] = {
1022 { "Striped", DDF_2STRIPED},
1023 { "Mirrored", DDF_2MIRRORED},
1024 { "Concat", DDF_2CONCAT},
1025 { "Spanned", DDF_2SPANNED},
1026 { NULL, 0}
1027 };
1028 #endif
1029
1030 struct num_mapping {
1031 int num1, num2;
1032 };
1033 static struct num_mapping ddf_level_num[] = {
1034 { DDF_RAID0, 0 },
1035 { DDF_RAID1, 1 },
1036 { DDF_RAID3, LEVEL_UNSUPPORTED },
1037 { DDF_RAID4, 4 },
1038 { DDF_RAID5, 5 },
1039 { DDF_RAID1E, LEVEL_UNSUPPORTED },
1040 { DDF_JBOD, LEVEL_UNSUPPORTED },
1041 { DDF_CONCAT, LEVEL_LINEAR },
1042 { DDF_RAID5E, LEVEL_UNSUPPORTED },
1043 { DDF_RAID5EE, LEVEL_UNSUPPORTED },
1044 { DDF_RAID6, 6},
1045 { MAXINT, MAXINT }
1046 };
1047
1048 static int map_num1(struct num_mapping *map, int num)
1049 {
1050 int i;
1051 for (i=0 ; map[i].num1 != MAXINT; i++)
1052 if (map[i].num1 == num)
1053 break;
1054 return map[i].num2;
1055 }
1056
1057 static int all_ff(char *guid)
1058 {
1059 int i;
1060 for (i = 0; i < DDF_GUID_LEN; i++)
1061 if (guid[i] != (char)0xff)
1062 return 0;
1063 return 1;
1064 }
1065
1066 #ifndef MDASSEMBLE
1067 static void print_guid(char *guid, int tstamp)
1068 {
1069 /* A GUIDs are part (or all) ASCII and part binary.
1070 * They tend to be space padded.
1071 * We print the GUID in HEX, then in parentheses add
1072 * any initial ASCII sequence, and a possible
1073 * time stamp from bytes 16-19
1074 */
1075 int l = DDF_GUID_LEN;
1076 int i;
1077
1078 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1079 if ((i&3)==0 && i != 0) printf(":");
1080 printf("%02X", guid[i]&255);
1081 }
1082
1083 printf("\n (");
1084 while (l && guid[l-1] == ' ')
1085 l--;
1086 for (i=0 ; i<l ; i++) {
1087 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1088 fputc(guid[i], stdout);
1089 else
1090 break;
1091 }
1092 if (tstamp) {
1093 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1094 char tbuf[100];
1095 struct tm *tm;
1096 tm = localtime(&then);
1097 strftime(tbuf, 100, " %D %T",tm);
1098 fputs(tbuf, stdout);
1099 }
1100 printf(")");
1101 }
1102
1103 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1104 {
1105 int crl = sb->conf_rec_len;
1106 struct vcl *vcl;
1107
1108 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1109 unsigned int i;
1110 struct vd_config *vc = &vcl->conf;
1111
1112 if (calc_crc(vc, crl*512) != vc->crc)
1113 continue;
1114 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1115 continue;
1116
1117 /* Ok, we know about this VD, let's give more details */
1118 printf(" Raid Devices[%d] : %d (", n,
1119 __be16_to_cpu(vc->prim_elmnt_count));
1120 for (i = 0; i < __be16_to_cpu(vc->prim_elmnt_count); i++) {
1121 int j;
1122 int cnt = __be16_to_cpu(sb->phys->used_pdes);
1123 for (j=0; j<cnt; j++)
1124 if (vc->phys_refnum[i] == sb->phys->entries[j].refnum)
1125 break;
1126 if (i) printf(" ");
1127 if (j < cnt)
1128 printf("%d", j);
1129 else
1130 printf("--");
1131 }
1132 printf(")\n");
1133 if (vc->chunk_shift != 255)
1134 printf(" Chunk Size[%d] : %d sectors\n", n,
1135 1 << vc->chunk_shift);
1136 printf(" Raid Level[%d] : %s\n", n,
1137 map_num(ddf_level, vc->prl)?:"-unknown-");
1138 if (vc->sec_elmnt_count != 1) {
1139 printf(" Secondary Position[%d] : %d of %d\n", n,
1140 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1141 printf(" Secondary Level[%d] : %s\n", n,
1142 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1143 }
1144 printf(" Device Size[%d] : %llu\n", n,
1145 (unsigned long long)__be64_to_cpu(vc->blocks)/2);
1146 printf(" Array Size[%d] : %llu\n", n,
1147 (unsigned long long)__be64_to_cpu(vc->array_blocks)/2);
1148 }
1149 }
1150
1151 static void examine_vds(struct ddf_super *sb)
1152 {
1153 int cnt = __be16_to_cpu(sb->virt->populated_vdes);
1154 int i;
1155 printf(" Virtual Disks : %d\n", cnt);
1156
1157 for (i=0; i<cnt; i++) {
1158 struct virtual_entry *ve = &sb->virt->entries[i];
1159 printf("\n");
1160 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1161 printf("\n");
1162 printf(" unit[%d] : %d\n", i, __be16_to_cpu(ve->unit));
1163 printf(" state[%d] : %s, %s%s\n", i,
1164 map_num(ddf_state, ve->state & 7),
1165 (ve->state & 8) ? "Morphing, ": "",
1166 (ve->state & 16)? "Not Consistent" : "Consistent");
1167 printf(" init state[%d] : %s\n", i,
1168 map_num(ddf_init_state, ve->init_state&3));
1169 printf(" access[%d] : %s\n", i,
1170 map_num(ddf_access, (ve->init_state>>6) & 3));
1171 printf(" Name[%d] : %.16s\n", i, ve->name);
1172 examine_vd(i, sb, ve->guid);
1173 }
1174 if (cnt) printf("\n");
1175 }
1176
1177 static void examine_pds(struct ddf_super *sb)
1178 {
1179 int cnt = __be16_to_cpu(sb->phys->used_pdes);
1180 int i;
1181 struct dl *dl;
1182 printf(" Physical Disks : %d\n", cnt);
1183 printf(" Number RefNo Size Device Type/State\n");
1184
1185 for (i=0 ; i<cnt ; i++) {
1186 struct phys_disk_entry *pd = &sb->phys->entries[i];
1187 int type = __be16_to_cpu(pd->type);
1188 int state = __be16_to_cpu(pd->state);
1189
1190 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1191 //printf("\n");
1192 printf(" %3d %08x ", i,
1193 __be32_to_cpu(pd->refnum));
1194 printf("%8lluK ",
1195 (unsigned long long)__be64_to_cpu(pd->config_size)>>1);
1196 for (dl = sb->dlist; dl ; dl = dl->next) {
1197 if (dl->disk.refnum == pd->refnum) {
1198 char *dv = map_dev(dl->major, dl->minor, 0);
1199 if (dv) {
1200 printf("%-15s", dv);
1201 break;
1202 }
1203 }
1204 }
1205 if (!dl)
1206 printf("%15s","");
1207 printf(" %s%s%s%s%s",
1208 (type&2) ? "active":"",
1209 (type&4) ? "Global-Spare":"",
1210 (type&8) ? "spare" : "",
1211 (type&16)? ", foreign" : "",
1212 (type&32)? "pass-through" : "");
1213 if (state & DDF_Failed)
1214 /* This over-rides these three */
1215 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1216 printf("/%s%s%s%s%s%s%s",
1217 (state&1)? "Online": "Offline",
1218 (state&2)? ", Failed": "",
1219 (state&4)? ", Rebuilding": "",
1220 (state&8)? ", in-transition": "",
1221 (state&16)? ", SMART-errors": "",
1222 (state&32)? ", Unrecovered-Read-Errors": "",
1223 (state&64)? ", Missing" : "");
1224 printf("\n");
1225 }
1226 }
1227
1228 static void examine_super_ddf(struct supertype *st, char *homehost)
1229 {
1230 struct ddf_super *sb = st->sb;
1231
1232 printf(" Magic : %08x\n", __be32_to_cpu(sb->anchor.magic));
1233 printf(" Version : %.8s\n", sb->anchor.revision);
1234 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1235 printf("\n");
1236 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1237 printf("\n");
1238 printf(" Seq : %08x\n", __be32_to_cpu(sb->active->seq));
1239 printf(" Redundant hdr : %s\n", sb->secondary.magic == DDF_HEADER_MAGIC
1240 ?"yes" : "no");
1241 examine_vds(sb);
1242 examine_pds(sb);
1243 }
1244
1245 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1246
1247 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1248 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1249
1250 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1251 {
1252 /*
1253 * Figure out the VD number for this supertype.
1254 * Returns DDF_CONTAINER for the container itself,
1255 * and DDF_NOTFOUND on error.
1256 */
1257 struct ddf_super *ddf = st->sb;
1258 struct mdinfo *sra;
1259 char *sub, *end;
1260 unsigned int vcnum;
1261
1262 if (*st->container_devnm == '\0')
1263 return DDF_CONTAINER;
1264
1265 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1266 if (!sra || sra->array.major_version != -1 ||
1267 sra->array.minor_version != -2 ||
1268 !is_subarray(sra->text_version))
1269 return DDF_NOTFOUND;
1270
1271 sub = strchr(sra->text_version + 1, '/');
1272 if (sub != NULL)
1273 vcnum = strtoul(sub + 1, &end, 10);
1274 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1275 vcnum >= __be16_to_cpu(ddf->active->max_vd_entries))
1276 return DDF_NOTFOUND;
1277
1278 return vcnum;
1279 }
1280
1281 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1282 {
1283 /* We just write a generic DDF ARRAY entry
1284 */
1285 struct mdinfo info;
1286 char nbuf[64];
1287 getinfo_super_ddf(st, &info, NULL);
1288 fname_from_uuid(st, &info, nbuf, ':');
1289
1290 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1291 }
1292
1293 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1294 {
1295 /* We just write a generic DDF ARRAY entry
1296 */
1297 struct ddf_super *ddf = st->sb;
1298 struct mdinfo info;
1299 unsigned int i;
1300 char nbuf[64];
1301 getinfo_super_ddf(st, &info, NULL);
1302 fname_from_uuid(st, &info, nbuf, ':');
1303
1304 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
1305 struct virtual_entry *ve = &ddf->virt->entries[i];
1306 struct vcl vcl;
1307 char nbuf1[64];
1308 if (all_ff(ve->guid))
1309 continue;
1310 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1311 ddf->currentconf =&vcl;
1312 uuid_from_super_ddf(st, info.uuid);
1313 fname_from_uuid(st, &info, nbuf1, ':');
1314 printf("ARRAY container=%s member=%d UUID=%s\n",
1315 nbuf+5, i, nbuf1+5);
1316 }
1317 }
1318
1319 static void export_examine_super_ddf(struct supertype *st)
1320 {
1321 struct mdinfo info;
1322 char nbuf[64];
1323 getinfo_super_ddf(st, &info, NULL);
1324 fname_from_uuid(st, &info, nbuf, ':');
1325 printf("MD_METADATA=ddf\n");
1326 printf("MD_LEVEL=container\n");
1327 printf("MD_UUID=%s\n", nbuf+5);
1328 }
1329
1330 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1331 {
1332 void *buf;
1333 unsigned long long dsize, offset;
1334 int bytes;
1335 struct ddf_header *ddf;
1336 int written = 0;
1337
1338 /* The meta consists of an anchor, a primary, and a secondary.
1339 * This all lives at the end of the device.
1340 * So it is easiest to find the earliest of primary and
1341 * secondary, and copy everything from there.
1342 *
1343 * Anchor is 512 from end It contains primary_lba and secondary_lba
1344 * we choose one of those
1345 */
1346
1347 if (posix_memalign(&buf, 4096, 4096) != 0)
1348 return 1;
1349
1350 if (!get_dev_size(from, NULL, &dsize))
1351 goto err;
1352
1353 if (lseek64(from, dsize-512, 0) < 0)
1354 goto err;
1355 if (read(from, buf, 512) != 512)
1356 goto err;
1357 ddf = buf;
1358 if (ddf->magic != DDF_HEADER_MAGIC ||
1359 calc_crc(ddf, 512) != ddf->crc ||
1360 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1361 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1362 goto err;
1363
1364 offset = dsize - 512;
1365 if ((__be64_to_cpu(ddf->primary_lba) << 9) < offset)
1366 offset = __be64_to_cpu(ddf->primary_lba) << 9;
1367 if ((__be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1368 offset = __be64_to_cpu(ddf->secondary_lba) << 9;
1369
1370 bytes = dsize - offset;
1371
1372 if (lseek64(from, offset, 0) < 0 ||
1373 lseek64(to, offset, 0) < 0)
1374 goto err;
1375 while (written < bytes) {
1376 int n = bytes - written;
1377 if (n > 4096)
1378 n = 4096;
1379 if (read(from, buf, n) != n)
1380 goto err;
1381 if (write(to, buf, n) != n)
1382 goto err;
1383 written += n;
1384 }
1385 free(buf);
1386 return 0;
1387 err:
1388 free(buf);
1389 return 1;
1390 }
1391
1392 static void detail_super_ddf(struct supertype *st, char *homehost)
1393 {
1394 /* FIXME later
1395 * Could print DDF GUID
1396 * Need to find which array
1397 * If whole, briefly list all arrays
1398 * If one, give name
1399 */
1400 }
1401
1402 static void brief_detail_super_ddf(struct supertype *st)
1403 {
1404 struct mdinfo info;
1405 char nbuf[64];
1406 struct ddf_super *ddf = st->sb;
1407 unsigned int vcnum = get_vd_num_of_subarray(st);
1408 if (vcnum == DDF_CONTAINER)
1409 uuid_from_super_ddf(st, info.uuid);
1410 else if (vcnum == DDF_NOTFOUND)
1411 return;
1412 else
1413 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, info.uuid);
1414 fname_from_uuid(st, &info, nbuf,':');
1415 printf(" UUID=%s", nbuf + 5);
1416 }
1417 #endif
1418
1419 static int match_home_ddf(struct supertype *st, char *homehost)
1420 {
1421 /* It matches 'this' host if the controller is a
1422 * Linux-MD controller with vendor_data matching
1423 * the hostname
1424 */
1425 struct ddf_super *ddf = st->sb;
1426 unsigned int len;
1427
1428 if (!homehost)
1429 return 0;
1430 len = strlen(homehost);
1431
1432 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1433 len < sizeof(ddf->controller.vendor_data) &&
1434 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1435 ddf->controller.vendor_data[len] == 0);
1436 }
1437
1438 #ifndef MDASSEMBLE
1439 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst)
1440 {
1441 struct vcl *v;
1442
1443 for (v = ddf->conflist; v; v = v->next)
1444 if (inst == v->vcnum)
1445 return &v->conf;
1446 return NULL;
1447 }
1448 #endif
1449
1450 static int find_phys(struct ddf_super *ddf, __u32 phys_refnum)
1451 {
1452 /* Find the entry in phys_disk which has the given refnum
1453 * and return it's index
1454 */
1455 unsigned int i;
1456 for (i = 0; i < __be16_to_cpu(ddf->phys->max_pdes); i++)
1457 if (ddf->phys->entries[i].refnum == phys_refnum)
1458 return i;
1459 return -1;
1460 }
1461
1462 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1463 {
1464 char buf[20];
1465 struct sha1_ctx ctx;
1466 sha1_init_ctx(&ctx);
1467 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1468 sha1_finish_ctx(&ctx, buf);
1469 memcpy(uuid, buf, 4*4);
1470 }
1471
1472 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1473 {
1474 /* The uuid returned here is used for:
1475 * uuid to put into bitmap file (Create, Grow)
1476 * uuid for backup header when saving critical section (Grow)
1477 * comparing uuids when re-adding a device into an array
1478 * In these cases the uuid required is that of the data-array,
1479 * not the device-set.
1480 * uuid to recognise same set when adding a missing device back
1481 * to an array. This is a uuid for the device-set.
1482 *
1483 * For each of these we can make do with a truncated
1484 * or hashed uuid rather than the original, as long as
1485 * everyone agrees.
1486 * In the case of SVD we assume the BVD is of interest,
1487 * though that might be the case if a bitmap were made for
1488 * a mirrored SVD - worry about that later.
1489 * So we need to find the VD configuration record for the
1490 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1491 * The first 16 bytes of the sha1 of these is used.
1492 */
1493 struct ddf_super *ddf = st->sb;
1494 struct vcl *vcl = ddf->currentconf;
1495 char *guid;
1496
1497 if (vcl)
1498 guid = vcl->conf.guid;
1499 else
1500 guid = ddf->anchor.guid;
1501 uuid_from_ddf_guid(guid, uuid);
1502 }
1503
1504 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1505
1506 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1507 {
1508 struct ddf_super *ddf = st->sb;
1509 int map_disks = info->array.raid_disks;
1510 __u32 *cptr;
1511
1512 if (ddf->currentconf) {
1513 getinfo_super_ddf_bvd(st, info, map);
1514 return;
1515 }
1516 memset(info, 0, sizeof(*info));
1517
1518 info->array.raid_disks = __be16_to_cpu(ddf->phys->used_pdes);
1519 info->array.level = LEVEL_CONTAINER;
1520 info->array.layout = 0;
1521 info->array.md_minor = -1;
1522 cptr = (__u32 *)(ddf->anchor.guid + 16);
1523 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1524
1525 info->array.utime = 0;
1526 info->array.chunk_size = 0;
1527 info->container_enough = 1;
1528
1529 info->disk.major = 0;
1530 info->disk.minor = 0;
1531 if (ddf->dlist) {
1532 info->disk.number = __be32_to_cpu(ddf->dlist->disk.refnum);
1533 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1534
1535 info->data_offset = __be64_to_cpu(ddf->phys->
1536 entries[info->disk.raid_disk].
1537 config_size);
1538 info->component_size = ddf->dlist->size - info->data_offset;
1539 } else {
1540 info->disk.number = -1;
1541 info->disk.raid_disk = -1;
1542 // info->disk.raid_disk = find refnum in the table and use index;
1543 }
1544 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1545
1546 info->recovery_start = MaxSector;
1547 info->reshape_active = 0;
1548 info->recovery_blocked = 0;
1549 info->name[0] = 0;
1550
1551 info->array.major_version = -1;
1552 info->array.minor_version = -2;
1553 strcpy(info->text_version, "ddf");
1554 info->safe_mode_delay = 0;
1555
1556 uuid_from_super_ddf(st, info->uuid);
1557
1558 if (map) {
1559 int i;
1560 for (i = 0 ; i < map_disks; i++) {
1561 if (i < info->array.raid_disks &&
1562 (__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Online) &&
1563 !(__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Failed))
1564 map[i] = 1;
1565 else
1566 map[i] = 0;
1567 }
1568 }
1569 }
1570
1571 static int rlq_to_layout(int rlq, int prl, int raiddisks);
1572
1573 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1574 {
1575 struct ddf_super *ddf = st->sb;
1576 struct vcl *vc = ddf->currentconf;
1577 int cd = ddf->currentdev;
1578 int j;
1579 struct dl *dl;
1580 int map_disks = info->array.raid_disks;
1581 __u32 *cptr;
1582
1583 memset(info, 0, sizeof(*info));
1584 /* FIXME this returns BVD info - what if we want SVD ?? */
1585
1586 info->array.raid_disks = __be16_to_cpu(vc->conf.prim_elmnt_count);
1587 info->array.level = map_num1(ddf_level_num, vc->conf.prl);
1588 info->array.layout = rlq_to_layout(vc->conf.rlq, vc->conf.prl,
1589 info->array.raid_disks);
1590 info->array.md_minor = -1;
1591 cptr = (__u32 *)(vc->conf.guid + 16);
1592 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1593 info->array.utime = DECADE + __be32_to_cpu(vc->conf.timestamp);
1594 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1595 info->custom_array_size = 0;
1596
1597 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1598 info->data_offset = __be64_to_cpu(vc->lba_offset[cd]);
1599 if (vc->block_sizes)
1600 info->component_size = vc->block_sizes[cd];
1601 else
1602 info->component_size = __be64_to_cpu(vc->conf.blocks);
1603 }
1604
1605 for (dl = ddf->dlist; dl ; dl = dl->next)
1606 if (dl->raiddisk == ddf->currentdev)
1607 break;
1608
1609 info->disk.major = 0;
1610 info->disk.minor = 0;
1611 info->disk.state = 0;
1612 if (dl) {
1613 info->disk.major = dl->major;
1614 info->disk.minor = dl->minor;
1615 info->disk.raid_disk = dl->raiddisk;
1616 info->disk.number = dl->pdnum;
1617 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
1618 }
1619
1620 info->container_member = ddf->currentconf->vcnum;
1621
1622 info->recovery_start = MaxSector;
1623 info->resync_start = 0;
1624 info->reshape_active = 0;
1625 info->recovery_blocked = 0;
1626 if (!(ddf->virt->entries[info->container_member].state
1627 & DDF_state_inconsistent) &&
1628 (ddf->virt->entries[info->container_member].init_state
1629 & DDF_initstate_mask)
1630 == DDF_init_full)
1631 info->resync_start = MaxSector;
1632
1633 uuid_from_super_ddf(st, info->uuid);
1634
1635 info->array.major_version = -1;
1636 info->array.minor_version = -2;
1637 sprintf(info->text_version, "/%s/%d",
1638 st->container_devnm,
1639 info->container_member);
1640 info->safe_mode_delay = 200;
1641
1642 memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
1643 info->name[16]=0;
1644 for(j=0; j<16; j++)
1645 if (info->name[j] == ' ')
1646 info->name[j] = 0;
1647
1648 if (map)
1649 for (j = 0; j < map_disks; j++) {
1650 map[j] = 0;
1651 if (j < info->array.raid_disks) {
1652 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
1653 if (i >= 0 &&
1654 (__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Online) &&
1655 !(__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Failed))
1656 map[i] = 1;
1657 }
1658 }
1659 }
1660
1661 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
1662 char *update,
1663 char *devname, int verbose,
1664 int uuid_set, char *homehost)
1665 {
1666 /* For 'assemble' and 'force' we need to return non-zero if any
1667 * change was made. For others, the return value is ignored.
1668 * Update options are:
1669 * force-one : This device looks a bit old but needs to be included,
1670 * update age info appropriately.
1671 * assemble: clear any 'faulty' flag to allow this device to
1672 * be assembled.
1673 * force-array: Array is degraded but being forced, mark it clean
1674 * if that will be needed to assemble it.
1675 *
1676 * newdev: not used ????
1677 * grow: Array has gained a new device - this is currently for
1678 * linear only
1679 * resync: mark as dirty so a resync will happen.
1680 * uuid: Change the uuid of the array to match what is given
1681 * homehost: update the recorded homehost
1682 * name: update the name - preserving the homehost
1683 * _reshape_progress: record new reshape_progress position.
1684 *
1685 * Following are not relevant for this version:
1686 * sparc2.2 : update from old dodgey metadata
1687 * super-minor: change the preferred_minor number
1688 * summaries: update redundant counters.
1689 */
1690 int rv = 0;
1691 // struct ddf_super *ddf = st->sb;
1692 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
1693 // struct virtual_entry *ve = find_ve(ddf);
1694
1695 /* we don't need to handle "force-*" or "assemble" as
1696 * there is no need to 'trick' the kernel. We the metadata is
1697 * first updated to activate the array, all the implied modifications
1698 * will just happen.
1699 */
1700
1701 if (strcmp(update, "grow") == 0) {
1702 /* FIXME */
1703 } else if (strcmp(update, "resync") == 0) {
1704 // info->resync_checkpoint = 0;
1705 } else if (strcmp(update, "homehost") == 0) {
1706 /* homehost is stored in controller->vendor_data,
1707 * or it is when we are the vendor
1708 */
1709 // if (info->vendor_is_local)
1710 // strcpy(ddf->controller.vendor_data, homehost);
1711 rv = -1;
1712 } else if (strcmp(update, "name") == 0) {
1713 /* name is stored in virtual_entry->name */
1714 // memset(ve->name, ' ', 16);
1715 // strncpy(ve->name, info->name, 16);
1716 rv = -1;
1717 } else if (strcmp(update, "_reshape_progress") == 0) {
1718 /* We don't support reshape yet */
1719 } else if (strcmp(update, "assemble") == 0 ) {
1720 /* Do nothing, just succeed */
1721 rv = 0;
1722 } else
1723 rv = -1;
1724
1725 // update_all_csum(ddf);
1726
1727 return rv;
1728 }
1729
1730 static void make_header_guid(char *guid)
1731 {
1732 __u32 stamp;
1733 /* Create a DDF Header of Virtual Disk GUID */
1734
1735 /* 24 bytes of fiction required.
1736 * first 8 are a 'vendor-id' - "Linux-MD"
1737 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
1738 * Remaining 8 random number plus timestamp
1739 */
1740 memcpy(guid, T10, sizeof(T10));
1741 stamp = __cpu_to_be32(0xdeadbeef);
1742 memcpy(guid+8, &stamp, 4);
1743 stamp = __cpu_to_be32(0);
1744 memcpy(guid+12, &stamp, 4);
1745 stamp = __cpu_to_be32(time(0) - DECADE);
1746 memcpy(guid+16, &stamp, 4);
1747 stamp = random32();
1748 memcpy(guid+20, &stamp, 4);
1749 }
1750
1751 static int init_super_ddf_bvd(struct supertype *st,
1752 mdu_array_info_t *info,
1753 unsigned long long size,
1754 char *name, char *homehost,
1755 int *uuid, unsigned long long data_offset);
1756
1757 static int init_super_ddf(struct supertype *st,
1758 mdu_array_info_t *info,
1759 unsigned long long size, char *name, char *homehost,
1760 int *uuid, unsigned long long data_offset)
1761 {
1762 /* This is primarily called by Create when creating a new array.
1763 * We will then get add_to_super called for each component, and then
1764 * write_init_super called to write it out to each device.
1765 * For DDF, Create can create on fresh devices or on a pre-existing
1766 * array.
1767 * To create on a pre-existing array a different method will be called.
1768 * This one is just for fresh drives.
1769 *
1770 * We need to create the entire 'ddf' structure which includes:
1771 * DDF headers - these are easy.
1772 * Controller data - a Sector describing this controller .. not that
1773 * this is a controller exactly.
1774 * Physical Disk Record - one entry per device, so
1775 * leave plenty of space.
1776 * Virtual Disk Records - again, just leave plenty of space.
1777 * This just lists VDs, doesn't give details
1778 * Config records - describes the VDs that use this disk
1779 * DiskData - describes 'this' device.
1780 * BadBlockManagement - empty
1781 * Diag Space - empty
1782 * Vendor Logs - Could we put bitmaps here?
1783 *
1784 */
1785 struct ddf_super *ddf;
1786 char hostname[17];
1787 int hostlen;
1788 int max_phys_disks, max_virt_disks;
1789 unsigned long long sector;
1790 int clen;
1791 int i;
1792 int pdsize, vdsize;
1793 struct phys_disk *pd;
1794 struct virtual_disk *vd;
1795
1796 if (data_offset != INVALID_SECTORS) {
1797 pr_err("data-offset not supported by DDF\n");
1798 return 0;
1799 }
1800
1801 if (st->sb)
1802 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
1803 data_offset);
1804
1805 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
1806 pr_err("%s could not allocate superblock\n", __func__);
1807 return 0;
1808 }
1809 memset(ddf, 0, sizeof(*ddf));
1810 ddf->dlist = NULL; /* no physical disks yet */
1811 ddf->conflist = NULL; /* No virtual disks yet */
1812 st->sb = ddf;
1813
1814 if (info == NULL) {
1815 /* zeroing superblock */
1816 return 0;
1817 }
1818
1819 /* At least 32MB *must* be reserved for the ddf. So let's just
1820 * start 32MB from the end, and put the primary header there.
1821 * Don't do secondary for now.
1822 * We don't know exactly where that will be yet as it could be
1823 * different on each device. To just set up the lengths.
1824 *
1825 */
1826
1827 ddf->anchor.magic = DDF_HEADER_MAGIC;
1828 make_header_guid(ddf->anchor.guid);
1829
1830 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
1831 ddf->anchor.seq = __cpu_to_be32(1);
1832 ddf->anchor.timestamp = __cpu_to_be32(time(0) - DECADE);
1833 ddf->anchor.openflag = 0xFF;
1834 ddf->anchor.foreignflag = 0;
1835 ddf->anchor.enforcegroups = 0; /* Is this best?? */
1836 ddf->anchor.pad0 = 0xff;
1837 memset(ddf->anchor.pad1, 0xff, 12);
1838 memset(ddf->anchor.header_ext, 0xff, 32);
1839 ddf->anchor.primary_lba = ~(__u64)0;
1840 ddf->anchor.secondary_lba = ~(__u64)0;
1841 ddf->anchor.type = DDF_HEADER_ANCHOR;
1842 memset(ddf->anchor.pad2, 0xff, 3);
1843 ddf->anchor.workspace_len = __cpu_to_be32(32768); /* Must be reserved */
1844 ddf->anchor.workspace_lba = ~(__u64)0; /* Put this at bottom
1845 of 32M reserved.. */
1846 max_phys_disks = 1023; /* Should be enough */
1847 ddf->anchor.max_pd_entries = __cpu_to_be16(max_phys_disks);
1848 max_virt_disks = 255;
1849 ddf->anchor.max_vd_entries = __cpu_to_be16(max_virt_disks); /* ?? */
1850 ddf->anchor.max_partitions = __cpu_to_be16(64); /* ?? */
1851 ddf->max_part = 64;
1852 ddf->mppe = 256;
1853 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
1854 ddf->anchor.config_record_len = __cpu_to_be16(ddf->conf_rec_len);
1855 ddf->anchor.max_primary_element_entries = __cpu_to_be16(ddf->mppe);
1856 memset(ddf->anchor.pad3, 0xff, 54);
1857 /* controller sections is one sector long immediately
1858 * after the ddf header */
1859 sector = 1;
1860 ddf->anchor.controller_section_offset = __cpu_to_be32(sector);
1861 ddf->anchor.controller_section_length = __cpu_to_be32(1);
1862 sector += 1;
1863
1864 /* phys is 8 sectors after that */
1865 pdsize = ROUND_UP(sizeof(struct phys_disk) +
1866 sizeof(struct phys_disk_entry)*max_phys_disks,
1867 512);
1868 switch(pdsize/512) {
1869 case 2: case 8: case 32: case 128: case 512: break;
1870 default: abort();
1871 }
1872 ddf->anchor.phys_section_offset = __cpu_to_be32(sector);
1873 ddf->anchor.phys_section_length =
1874 __cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
1875 sector += pdsize/512;
1876
1877 /* virt is another 32 sectors */
1878 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
1879 sizeof(struct virtual_entry) * max_virt_disks,
1880 512);
1881 switch(vdsize/512) {
1882 case 2: case 8: case 32: case 128: case 512: break;
1883 default: abort();
1884 }
1885 ddf->anchor.virt_section_offset = __cpu_to_be32(sector);
1886 ddf->anchor.virt_section_length =
1887 __cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
1888 sector += vdsize/512;
1889
1890 clen = ddf->conf_rec_len * (ddf->max_part+1);
1891 ddf->anchor.config_section_offset = __cpu_to_be32(sector);
1892 ddf->anchor.config_section_length = __cpu_to_be32(clen);
1893 sector += clen;
1894
1895 ddf->anchor.data_section_offset = __cpu_to_be32(sector);
1896 ddf->anchor.data_section_length = __cpu_to_be32(1);
1897 sector += 1;
1898
1899 ddf->anchor.bbm_section_length = __cpu_to_be32(0);
1900 ddf->anchor.bbm_section_offset = __cpu_to_be32(0xFFFFFFFF);
1901 ddf->anchor.diag_space_length = __cpu_to_be32(0);
1902 ddf->anchor.diag_space_offset = __cpu_to_be32(0xFFFFFFFF);
1903 ddf->anchor.vendor_length = __cpu_to_be32(0);
1904 ddf->anchor.vendor_offset = __cpu_to_be32(0xFFFFFFFF);
1905
1906 memset(ddf->anchor.pad4, 0xff, 256);
1907
1908 memcpy(&ddf->primary, &ddf->anchor, 512);
1909 memcpy(&ddf->secondary, &ddf->anchor, 512);
1910
1911 ddf->primary.openflag = 1; /* I guess.. */
1912 ddf->primary.type = DDF_HEADER_PRIMARY;
1913
1914 ddf->secondary.openflag = 1; /* I guess.. */
1915 ddf->secondary.type = DDF_HEADER_SECONDARY;
1916
1917 ddf->active = &ddf->primary;
1918
1919 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
1920
1921 /* 24 more bytes of fiction required.
1922 * first 8 are a 'vendor-id' - "Linux-MD"
1923 * Remaining 16 are serial number.... maybe a hostname would do?
1924 */
1925 memcpy(ddf->controller.guid, T10, sizeof(T10));
1926 gethostname(hostname, sizeof(hostname));
1927 hostname[sizeof(hostname) - 1] = 0;
1928 hostlen = strlen(hostname);
1929 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
1930 for (i = strlen(T10) ; i+hostlen < 24; i++)
1931 ddf->controller.guid[i] = ' ';
1932
1933 ddf->controller.type.vendor_id = __cpu_to_be16(0xDEAD);
1934 ddf->controller.type.device_id = __cpu_to_be16(0xBEEF);
1935 ddf->controller.type.sub_vendor_id = 0;
1936 ddf->controller.type.sub_device_id = 0;
1937 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
1938 memset(ddf->controller.pad, 0xff, 8);
1939 memset(ddf->controller.vendor_data, 0xff, 448);
1940 if (homehost && strlen(homehost) < 440)
1941 strcpy((char*)ddf->controller.vendor_data, homehost);
1942
1943 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
1944 pr_err("%s could not allocate pd\n", __func__);
1945 return 0;
1946 }
1947 ddf->phys = pd;
1948 ddf->pdsize = pdsize;
1949
1950 memset(pd, 0xff, pdsize);
1951 memset(pd, 0, sizeof(*pd));
1952 pd->magic = DDF_PHYS_RECORDS_MAGIC;
1953 pd->used_pdes = __cpu_to_be16(0);
1954 pd->max_pdes = __cpu_to_be16(max_phys_disks);
1955 memset(pd->pad, 0xff, 52);
1956
1957 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
1958 pr_err("%s could not allocate vd\n", __func__);
1959 return 0;
1960 }
1961 ddf->virt = vd;
1962 ddf->vdsize = vdsize;
1963 memset(vd, 0, vdsize);
1964 vd->magic = DDF_VIRT_RECORDS_MAGIC;
1965 vd->populated_vdes = __cpu_to_be16(0);
1966 vd->max_vdes = __cpu_to_be16(max_virt_disks);
1967 memset(vd->pad, 0xff, 52);
1968
1969 for (i=0; i<max_virt_disks; i++)
1970 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
1971
1972 st->sb = ddf;
1973 ddf_set_updates_pending(ddf);
1974 return 1;
1975 }
1976
1977 static int chunk_to_shift(int chunksize)
1978 {
1979 return ffs(chunksize/512)-1;
1980 }
1981
1982 static int level_to_prl(int level)
1983 {
1984 switch (level) {
1985 case LEVEL_LINEAR: return DDF_CONCAT;
1986 case 0: return DDF_RAID0;
1987 case 1: return DDF_RAID1;
1988 case 4: return DDF_RAID4;
1989 case 5: return DDF_RAID5;
1990 case 6: return DDF_RAID6;
1991 default: return -1;
1992 }
1993 }
1994
1995 static int layout_to_rlq(int level, int layout, int raiddisks)
1996 {
1997 switch(level) {
1998 case 0:
1999 return DDF_RAID0_SIMPLE;
2000 case 1:
2001 switch(raiddisks) {
2002 case 2: return DDF_RAID1_SIMPLE;
2003 case 3: return DDF_RAID1_MULTI;
2004 default: return -1;
2005 }
2006 case 4:
2007 switch(layout) {
2008 case 0: return DDF_RAID4_N;
2009 }
2010 break;
2011 case 5:
2012 switch(layout) {
2013 case ALGORITHM_LEFT_ASYMMETRIC:
2014 return DDF_RAID5_N_RESTART;
2015 case ALGORITHM_RIGHT_ASYMMETRIC:
2016 return DDF_RAID5_0_RESTART;
2017 case ALGORITHM_LEFT_SYMMETRIC:
2018 return DDF_RAID5_N_CONTINUE;
2019 case ALGORITHM_RIGHT_SYMMETRIC:
2020 return -1; /* not mentioned in standard */
2021 }
2022 case 6:
2023 switch(layout) {
2024 case ALGORITHM_ROTATING_N_RESTART:
2025 return DDF_RAID5_N_RESTART;
2026 case ALGORITHM_ROTATING_ZERO_RESTART:
2027 return DDF_RAID6_0_RESTART;
2028 case ALGORITHM_ROTATING_N_CONTINUE:
2029 return DDF_RAID5_N_CONTINUE;
2030 }
2031 }
2032 return -1;
2033 }
2034
2035 static int rlq_to_layout(int rlq, int prl, int raiddisks)
2036 {
2037 switch(prl) {
2038 case DDF_RAID0:
2039 return 0; /* hopefully rlq == DDF_RAID0_SIMPLE */
2040 case DDF_RAID1:
2041 return 0; /* hopefully rlq == SIMPLE or MULTI depending
2042 on raiddisks*/
2043 case DDF_RAID4:
2044 switch(rlq) {
2045 case DDF_RAID4_N:
2046 return 0;
2047 default:
2048 /* not supported */
2049 return -1; /* FIXME this isn't checked */
2050 }
2051 case DDF_RAID5:
2052 switch(rlq) {
2053 case DDF_RAID5_N_RESTART:
2054 return ALGORITHM_LEFT_ASYMMETRIC;
2055 case DDF_RAID5_0_RESTART:
2056 return ALGORITHM_RIGHT_ASYMMETRIC;
2057 case DDF_RAID5_N_CONTINUE:
2058 return ALGORITHM_LEFT_SYMMETRIC;
2059 default:
2060 return -1;
2061 }
2062 case DDF_RAID6:
2063 switch(rlq) {
2064 case DDF_RAID5_N_RESTART:
2065 return ALGORITHM_ROTATING_N_RESTART;
2066 case DDF_RAID6_0_RESTART:
2067 return ALGORITHM_ROTATING_ZERO_RESTART;
2068 case DDF_RAID5_N_CONTINUE:
2069 return ALGORITHM_ROTATING_N_CONTINUE;
2070 default:
2071 return -1;
2072 }
2073 }
2074 return -1;
2075 }
2076
2077 #ifndef MDASSEMBLE
2078 struct extent {
2079 unsigned long long start, size;
2080 };
2081 static int cmp_extent(const void *av, const void *bv)
2082 {
2083 const struct extent *a = av;
2084 const struct extent *b = bv;
2085 if (a->start < b->start)
2086 return -1;
2087 if (a->start > b->start)
2088 return 1;
2089 return 0;
2090 }
2091
2092 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2093 {
2094 /* find a list of used extents on the give physical device
2095 * (dnum) of the given ddf.
2096 * Return a malloced array of 'struct extent'
2097
2098 * FIXME ignore DDF_Legacy devices?
2099
2100 */
2101 struct extent *rv;
2102 int n = 0;
2103 unsigned int i, j;
2104
2105 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2106
2107 for (i = 0; i < ddf->max_part; i++) {
2108 struct vcl *v = dl->vlist[i];
2109 if (v == NULL)
2110 continue;
2111 for (j = 0; j < v->conf.prim_elmnt_count; j++)
2112 if (v->conf.phys_refnum[j] == dl->disk.refnum) {
2113 /* This device plays role 'j' in 'v'. */
2114 rv[n].start = __be64_to_cpu(v->lba_offset[j]);
2115 rv[n].size = __be64_to_cpu(v->conf.blocks);
2116 n++;
2117 break;
2118 }
2119 }
2120 qsort(rv, n, sizeof(*rv), cmp_extent);
2121
2122 rv[n].start = __be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2123 rv[n].size = 0;
2124 return rv;
2125 }
2126 #endif
2127
2128 static int init_super_ddf_bvd(struct supertype *st,
2129 mdu_array_info_t *info,
2130 unsigned long long size,
2131 char *name, char *homehost,
2132 int *uuid, unsigned long long data_offset)
2133 {
2134 /* We are creating a BVD inside a pre-existing container.
2135 * so st->sb is already set.
2136 * We need to create a new vd_config and a new virtual_entry
2137 */
2138 struct ddf_super *ddf = st->sb;
2139 unsigned int venum;
2140 struct virtual_entry *ve;
2141 struct vcl *vcl;
2142 struct vd_config *vc;
2143
2144 if (__be16_to_cpu(ddf->virt->populated_vdes)
2145 >= __be16_to_cpu(ddf->virt->max_vdes)) {
2146 pr_err("This ddf already has the "
2147 "maximum of %d virtual devices\n",
2148 __be16_to_cpu(ddf->virt->max_vdes));
2149 return 0;
2150 }
2151
2152 if (name)
2153 for (venum = 0; venum < __be16_to_cpu(ddf->virt->max_vdes); venum++)
2154 if (!all_ff(ddf->virt->entries[venum].guid)) {
2155 char *n = ddf->virt->entries[venum].name;
2156
2157 if (strncmp(name, n, 16) == 0) {
2158 pr_err("This ddf already"
2159 " has an array called %s\n",
2160 name);
2161 return 0;
2162 }
2163 }
2164
2165 for (venum = 0; venum < __be16_to_cpu(ddf->virt->max_vdes); venum++)
2166 if (all_ff(ddf->virt->entries[venum].guid))
2167 break;
2168 if (venum == __be16_to_cpu(ddf->virt->max_vdes)) {
2169 pr_err("Cannot find spare slot for "
2170 "virtual disk - DDF is corrupt\n");
2171 return 0;
2172 }
2173 ve = &ddf->virt->entries[venum];
2174
2175 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2176 * timestamp, random number
2177 */
2178 make_header_guid(ve->guid);
2179 ve->unit = __cpu_to_be16(info->md_minor);
2180 ve->pad0 = 0xFFFF;
2181 ve->guid_crc = crc32(0, (unsigned char*)ddf->anchor.guid, DDF_GUID_LEN);
2182 ve->type = 0;
2183 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2184 if (info->state & 1) /* clean */
2185 ve->init_state = DDF_init_full;
2186 else
2187 ve->init_state = DDF_init_not;
2188
2189 memset(ve->pad1, 0xff, 14);
2190 memset(ve->name, ' ', 16);
2191 if (name)
2192 strncpy(ve->name, name, 16);
2193 ddf->virt->populated_vdes =
2194 __cpu_to_be16(__be16_to_cpu(ddf->virt->populated_vdes)+1);
2195
2196 /* Now create a new vd_config */
2197 if (posix_memalign((void**)&vcl, 512,
2198 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2199 pr_err("%s could not allocate vd_config\n", __func__);
2200 return 0;
2201 }
2202 vcl->lba_offset = (__u64*) &vcl->conf.phys_refnum[ddf->mppe];
2203 vcl->vcnum = venum;
2204 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2205 vcl->other_bvds = NULL;
2206
2207 vc = &vcl->conf;
2208
2209 vc->magic = DDF_VD_CONF_MAGIC;
2210 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2211 vc->timestamp = __cpu_to_be32(time(0)-DECADE);
2212 vc->seqnum = __cpu_to_be32(1);
2213 memset(vc->pad0, 0xff, 24);
2214 vc->prim_elmnt_count = __cpu_to_be16(info->raid_disks);
2215 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2216 vc->prl = level_to_prl(info->level);
2217 vc->rlq = layout_to_rlq(info->level, info->layout, info->raid_disks);
2218 vc->sec_elmnt_count = 1;
2219 vc->sec_elmnt_seq = 0;
2220 vc->srl = 0;
2221 vc->blocks = __cpu_to_be64(info->size * 2);
2222 vc->array_blocks = __cpu_to_be64(
2223 calc_array_size(info->level, info->raid_disks, info->layout,
2224 info->chunk_size, info->size*2));
2225 memset(vc->pad1, 0xff, 8);
2226 vc->spare_refs[0] = 0xffffffff;
2227 vc->spare_refs[1] = 0xffffffff;
2228 vc->spare_refs[2] = 0xffffffff;
2229 vc->spare_refs[3] = 0xffffffff;
2230 vc->spare_refs[4] = 0xffffffff;
2231 vc->spare_refs[5] = 0xffffffff;
2232 vc->spare_refs[6] = 0xffffffff;
2233 vc->spare_refs[7] = 0xffffffff;
2234 memset(vc->cache_pol, 0, 8);
2235 vc->bg_rate = 0x80;
2236 memset(vc->pad2, 0xff, 3);
2237 memset(vc->pad3, 0xff, 52);
2238 memset(vc->pad4, 0xff, 192);
2239 memset(vc->v0, 0xff, 32);
2240 memset(vc->v1, 0xff, 32);
2241 memset(vc->v2, 0xff, 16);
2242 memset(vc->v3, 0xff, 16);
2243 memset(vc->vendor, 0xff, 32);
2244
2245 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2246 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2247
2248 vcl->next = ddf->conflist;
2249 ddf->conflist = vcl;
2250 ddf->currentconf = vcl;
2251 ddf_set_updates_pending(ddf);
2252 return 1;
2253 }
2254
2255 #ifndef MDASSEMBLE
2256 static void add_to_super_ddf_bvd(struct supertype *st,
2257 mdu_disk_info_t *dk, int fd, char *devname)
2258 {
2259 /* fd and devname identify a device with-in the ddf container (st).
2260 * dk identifies a location in the new BVD.
2261 * We need to find suitable free space in that device and update
2262 * the phys_refnum and lba_offset for the newly created vd_config.
2263 * We might also want to update the type in the phys_disk
2264 * section.
2265 *
2266 * Alternately: fd == -1 and we have already chosen which device to
2267 * use and recorded in dlist->raid_disk;
2268 */
2269 struct dl *dl;
2270 struct ddf_super *ddf = st->sb;
2271 struct vd_config *vc;
2272 __u64 *lba_offset;
2273 unsigned int working;
2274 unsigned int i;
2275 unsigned long long blocks, pos, esize;
2276 struct extent *ex;
2277
2278 if (fd == -1) {
2279 for (dl = ddf->dlist; dl ; dl = dl->next)
2280 if (dl->raiddisk == dk->raid_disk)
2281 break;
2282 } else {
2283 for (dl = ddf->dlist; dl ; dl = dl->next)
2284 if (dl->major == dk->major &&
2285 dl->minor == dk->minor)
2286 break;
2287 }
2288 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2289 return;
2290
2291 vc = &ddf->currentconf->conf;
2292 lba_offset = ddf->currentconf->lba_offset;
2293
2294 ex = get_extents(ddf, dl);
2295 if (!ex)
2296 return;
2297
2298 i = 0; pos = 0;
2299 blocks = __be64_to_cpu(vc->blocks);
2300 if (ddf->currentconf->block_sizes)
2301 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2302
2303 do {
2304 esize = ex[i].start - pos;
2305 if (esize >= blocks)
2306 break;
2307 pos = ex[i].start + ex[i].size;
2308 i++;
2309 } while (ex[i-1].size);
2310
2311 free(ex);
2312 if (esize < blocks)
2313 return;
2314
2315 ddf->currentdev = dk->raid_disk;
2316 vc->phys_refnum[dk->raid_disk] = dl->disk.refnum;
2317 lba_offset[dk->raid_disk] = __cpu_to_be64(pos);
2318
2319 for (i = 0; i < ddf->max_part ; i++)
2320 if (dl->vlist[i] == NULL)
2321 break;
2322 if (i == ddf->max_part)
2323 return;
2324 dl->vlist[i] = ddf->currentconf;
2325
2326 if (fd >= 0)
2327 dl->fd = fd;
2328 if (devname)
2329 dl->devname = devname;
2330
2331 /* Check how many working raid_disks, and if we can mark
2332 * array as optimal yet
2333 */
2334 working = 0;
2335
2336 for (i = 0; i < __be16_to_cpu(vc->prim_elmnt_count); i++)
2337 if (vc->phys_refnum[i] != 0xffffffff)
2338 working++;
2339
2340 /* Find which virtual_entry */
2341 i = ddf->currentconf->vcnum;
2342 if (working == __be16_to_cpu(vc->prim_elmnt_count))
2343 ddf->virt->entries[i].state =
2344 (ddf->virt->entries[i].state & ~DDF_state_mask)
2345 | DDF_state_optimal;
2346
2347 if (vc->prl == DDF_RAID6 &&
2348 working+1 == __be16_to_cpu(vc->prim_elmnt_count))
2349 ddf->virt->entries[i].state =
2350 (ddf->virt->entries[i].state & ~DDF_state_mask)
2351 | DDF_state_part_optimal;
2352
2353 ddf->phys->entries[dl->pdnum].type &= ~__cpu_to_be16(DDF_Global_Spare);
2354 ddf->phys->entries[dl->pdnum].type |= __cpu_to_be16(DDF_Active_in_VD);
2355 ddf_set_updates_pending(ddf);
2356 }
2357
2358 /* add a device to a container, either while creating it or while
2359 * expanding a pre-existing container
2360 */
2361 static int add_to_super_ddf(struct supertype *st,
2362 mdu_disk_info_t *dk, int fd, char *devname,
2363 unsigned long long data_offset)
2364 {
2365 struct ddf_super *ddf = st->sb;
2366 struct dl *dd;
2367 time_t now;
2368 struct tm *tm;
2369 unsigned long long size;
2370 struct phys_disk_entry *pde;
2371 unsigned int n, i;
2372 struct stat stb;
2373 __u32 *tptr;
2374
2375 if (ddf->currentconf) {
2376 add_to_super_ddf_bvd(st, dk, fd, devname);
2377 return 0;
2378 }
2379
2380 /* This is device numbered dk->number. We need to create
2381 * a phys_disk entry and a more detailed disk_data entry.
2382 */
2383 fstat(fd, &stb);
2384 if (posix_memalign((void**)&dd, 512,
2385 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2386 pr_err("%s could allocate buffer for new disk, aborting\n",
2387 __func__);
2388 return 1;
2389 }
2390 dd->major = major(stb.st_rdev);
2391 dd->minor = minor(stb.st_rdev);
2392 dd->devname = devname;
2393 dd->fd = fd;
2394 dd->spare = NULL;
2395
2396 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2397 now = time(0);
2398 tm = localtime(&now);
2399 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2400 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2401 tptr = (__u32 *)(dd->disk.guid + 16);
2402 *tptr++ = random32();
2403 *tptr = random32();
2404
2405 do {
2406 /* Cannot be bothered finding a CRC of some irrelevant details*/
2407 dd->disk.refnum = random32();
2408 for (i = __be16_to_cpu(ddf->active->max_pd_entries);
2409 i > 0; i--)
2410 if (ddf->phys->entries[i-1].refnum == dd->disk.refnum)
2411 break;
2412 } while (i > 0);
2413
2414 dd->disk.forced_ref = 1;
2415 dd->disk.forced_guid = 1;
2416 memset(dd->disk.vendor, ' ', 32);
2417 memcpy(dd->disk.vendor, "Linux", 5);
2418 memset(dd->disk.pad, 0xff, 442);
2419 for (i = 0; i < ddf->max_part ; i++)
2420 dd->vlist[i] = NULL;
2421
2422 n = __be16_to_cpu(ddf->phys->used_pdes);
2423 pde = &ddf->phys->entries[n];
2424 dd->pdnum = n;
2425
2426 if (st->update_tail) {
2427 int len = (sizeof(struct phys_disk) +
2428 sizeof(struct phys_disk_entry));
2429 struct phys_disk *pd;
2430
2431 pd = xmalloc(len);
2432 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2433 pd->used_pdes = __cpu_to_be16(n);
2434 pde = &pd->entries[0];
2435 dd->mdupdate = pd;
2436 } else {
2437 n++;
2438 ddf->phys->used_pdes = __cpu_to_be16(n);
2439 }
2440
2441 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2442 pde->refnum = dd->disk.refnum;
2443 pde->type = __cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2444 pde->state = __cpu_to_be16(DDF_Online);
2445 get_dev_size(fd, NULL, &size);
2446 /* We are required to reserve 32Meg, and record the size in sectors */
2447 pde->config_size = __cpu_to_be64( (size - 32*1024*1024) / 512);
2448 sprintf(pde->path, "%17.17s","Information: nil") ;
2449 memset(pde->pad, 0xff, 6);
2450
2451 dd->size = size >> 9;
2452 if (st->update_tail) {
2453 dd->next = ddf->add_list;
2454 ddf->add_list = dd;
2455 } else {
2456 dd->next = ddf->dlist;
2457 ddf->dlist = dd;
2458 ddf_set_updates_pending(ddf);
2459 }
2460
2461 return 0;
2462 }
2463
2464 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2465 {
2466 struct ddf_super *ddf = st->sb;
2467 struct dl *dl;
2468
2469 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2470 * disappeared from the container.
2471 * We need to arrange that it disappears from the metadata and
2472 * internal data structures too.
2473 * Most of the work is done by ddf_process_update which edits
2474 * the metadata and closes the file handle and attaches the memory
2475 * where free_updates will free it.
2476 */
2477 for (dl = ddf->dlist; dl ; dl = dl->next)
2478 if (dl->major == dk->major &&
2479 dl->minor == dk->minor)
2480 break;
2481 if (!dl)
2482 return -1;
2483
2484 if (st->update_tail) {
2485 int len = (sizeof(struct phys_disk) +
2486 sizeof(struct phys_disk_entry));
2487 struct phys_disk *pd;
2488
2489 pd = xmalloc(len);
2490 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2491 pd->used_pdes = __cpu_to_be16(dl->pdnum);
2492 pd->entries[0].state = __cpu_to_be16(DDF_Missing);
2493 append_metadata_update(st, pd, len);
2494 }
2495 return 0;
2496 }
2497
2498 /*
2499 * This is the write_init_super method for a ddf container. It is
2500 * called when creating a container or adding another device to a
2501 * container.
2502 */
2503 #define NULL_CONF_SZ 4096
2504
2505 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
2506 __u32 refnum, unsigned int nmax,
2507 const struct vd_config **bvd,
2508 unsigned int *idx);
2509
2510 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type,
2511 char *null_aligned)
2512 {
2513 unsigned long long sector;
2514 struct ddf_header *header;
2515 int fd, i, n_config, conf_size;
2516
2517 fd = d->fd;
2518
2519 switch (type) {
2520 case DDF_HEADER_PRIMARY:
2521 header = &ddf->primary;
2522 sector = __be64_to_cpu(header->primary_lba);
2523 break;
2524 case DDF_HEADER_SECONDARY:
2525 header = &ddf->secondary;
2526 sector = __be64_to_cpu(header->secondary_lba);
2527 break;
2528 default:
2529 return 0;
2530 }
2531
2532 header->type = type;
2533 header->openflag = 0;
2534 header->crc = calc_crc(header, 512);
2535
2536 lseek64(fd, sector<<9, 0);
2537 if (write(fd, header, 512) < 0)
2538 return 0;
2539
2540 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2541 if (write(fd, &ddf->controller, 512) < 0)
2542 return 0;
2543
2544 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2545 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2546 return 0;
2547 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2548 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2549 return 0;
2550
2551 /* Now write lots of config records. */
2552 n_config = ddf->max_part;
2553 conf_size = ddf->conf_rec_len * 512;
2554 for (i = 0 ; i <= n_config ; i++) {
2555 struct vcl *c;
2556 struct vd_config *vdc = NULL;
2557 if (i == n_config) {
2558 c = (struct vcl *)d->spare;
2559 if (c)
2560 vdc = &c->conf;
2561 } else {
2562 unsigned int dummy;
2563 c = d->vlist[i];
2564 if (c)
2565 get_pd_index_from_refnum(
2566 c, d->disk.refnum,
2567 ddf->mppe,
2568 (const struct vd_config **)&vdc,
2569 &dummy);
2570 }
2571 if (c) {
2572 vdc->seqnum = header->seq;
2573 vdc->crc = calc_crc(vdc, conf_size);
2574 if (write(fd, vdc, conf_size) < 0)
2575 break;
2576 } else {
2577 unsigned int togo = conf_size;
2578 while (togo > NULL_CONF_SZ) {
2579 if (write(fd, null_aligned, NULL_CONF_SZ) < 0)
2580 break;
2581 togo -= NULL_CONF_SZ;
2582 }
2583 if (write(fd, null_aligned, togo) < 0)
2584 break;
2585 }
2586 }
2587 if (i <= n_config)
2588 return 0;
2589
2590 d->disk.crc = calc_crc(&d->disk, 512);
2591 if (write(fd, &d->disk, 512) < 0)
2592 return 0;
2593
2594 return 1;
2595 }
2596
2597 static int __write_init_super_ddf(struct supertype *st)
2598 {
2599 struct ddf_super *ddf = st->sb;
2600 struct dl *d;
2601 int attempts = 0;
2602 int successes = 0;
2603 unsigned long long size;
2604 char *null_aligned;
2605 __u32 seq;
2606
2607 pr_state(ddf, __func__);
2608 if (posix_memalign((void**)&null_aligned, 4096, NULL_CONF_SZ) != 0) {
2609 return -ENOMEM;
2610 }
2611 memset(null_aligned, 0xff, NULL_CONF_SZ);
2612
2613 seq = ddf->active->seq + 1;
2614
2615 /* try to write updated metadata,
2616 * if we catch a failure move on to the next disk
2617 */
2618 for (d = ddf->dlist; d; d=d->next) {
2619 int fd = d->fd;
2620
2621 if (fd < 0)
2622 continue;
2623
2624 attempts++;
2625 /* We need to fill in the primary, (secondary) and workspace
2626 * lba's in the headers, set their checksums,
2627 * Also checksum phys, virt....
2628 *
2629 * Then write everything out, finally the anchor is written.
2630 */
2631 get_dev_size(fd, NULL, &size);
2632 size /= 512;
2633 if (d->workspace_lba != 0)
2634 ddf->anchor.workspace_lba = d->workspace_lba;
2635 else
2636 ddf->anchor.workspace_lba =
2637 __cpu_to_be64(size - 32*1024*2);
2638 if (d->primary_lba != 0)
2639 ddf->anchor.primary_lba = d->primary_lba;
2640 else
2641 ddf->anchor.primary_lba =
2642 __cpu_to_be64(size - 16*1024*2);
2643 if (d->secondary_lba != 0)
2644 ddf->anchor.secondary_lba = d->secondary_lba;
2645 else
2646 ddf->anchor.secondary_lba =
2647 __cpu_to_be64(size - 32*1024*2);
2648 ddf->anchor.seq = seq;
2649 memcpy(&ddf->primary, &ddf->anchor, 512);
2650 memcpy(&ddf->secondary, &ddf->anchor, 512);
2651
2652 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
2653 ddf->anchor.seq = 0xFFFFFFFF; /* no sequencing in anchor */
2654 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
2655
2656 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY,
2657 null_aligned))
2658 continue;
2659
2660 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY,
2661 null_aligned))
2662 continue;
2663
2664 lseek64(fd, (size-1)*512, SEEK_SET);
2665 if (write(fd, &ddf->anchor, 512) < 0)
2666 continue;
2667 successes++;
2668 }
2669 free(null_aligned);
2670
2671 return attempts != successes;
2672 }
2673
2674 static int write_init_super_ddf(struct supertype *st)
2675 {
2676 struct ddf_super *ddf = st->sb;
2677 struct vcl *currentconf = ddf->currentconf;
2678
2679 /* we are done with currentconf reset it to point st at the container */
2680 ddf->currentconf = NULL;
2681
2682 if (st->update_tail) {
2683 /* queue the virtual_disk and vd_config as metadata updates */
2684 struct virtual_disk *vd;
2685 struct vd_config *vc;
2686 int len;
2687
2688 if (!currentconf) {
2689 int len = (sizeof(struct phys_disk) +
2690 sizeof(struct phys_disk_entry));
2691
2692 /* adding a disk to the container. */
2693 if (!ddf->add_list)
2694 return 0;
2695
2696 append_metadata_update(st, ddf->add_list->mdupdate, len);
2697 ddf->add_list->mdupdate = NULL;
2698 return 0;
2699 }
2700
2701 /* Newly created VD */
2702
2703 /* First the virtual disk. We have a slightly fake header */
2704 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
2705 vd = xmalloc(len);
2706 *vd = *ddf->virt;
2707 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
2708 vd->populated_vdes = __cpu_to_be16(currentconf->vcnum);
2709 append_metadata_update(st, vd, len);
2710
2711 /* Then the vd_config */
2712 len = ddf->conf_rec_len * 512;
2713 vc = xmalloc(len);
2714 memcpy(vc, &currentconf->conf, len);
2715 append_metadata_update(st, vc, len);
2716
2717 /* FIXME I need to close the fds! */
2718 return 0;
2719 } else {
2720 struct dl *d;
2721 for (d = ddf->dlist; d; d=d->next)
2722 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
2723 return __write_init_super_ddf(st);
2724 }
2725 }
2726
2727 #endif
2728
2729 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
2730 unsigned long long data_offset)
2731 {
2732 /* We must reserve the last 32Meg */
2733 if (devsize <= 32*1024*2)
2734 return 0;
2735 return devsize - 32*1024*2;
2736 }
2737
2738 #ifndef MDASSEMBLE
2739
2740 static int reserve_space(struct supertype *st, int raiddisks,
2741 unsigned long long size, int chunk,
2742 unsigned long long *freesize)
2743 {
2744 /* Find 'raiddisks' spare extents at least 'size' big (but
2745 * only caring about multiples of 'chunk') and remember
2746 * them.
2747 * If the cannot be found, fail.
2748 */
2749 struct dl *dl;
2750 struct ddf_super *ddf = st->sb;
2751 int cnt = 0;
2752
2753 for (dl = ddf->dlist; dl ; dl=dl->next) {
2754 dl->raiddisk = -1;
2755 dl->esize = 0;
2756 }
2757 /* Now find largest extent on each device */
2758 for (dl = ddf->dlist ; dl ; dl=dl->next) {
2759 struct extent *e = get_extents(ddf, dl);
2760 unsigned long long pos = 0;
2761 int i = 0;
2762 int found = 0;
2763 unsigned long long minsize = size;
2764
2765 if (size == 0)
2766 minsize = chunk;
2767
2768 if (!e)
2769 continue;
2770 do {
2771 unsigned long long esize;
2772 esize = e[i].start - pos;
2773 if (esize >= minsize) {
2774 found = 1;
2775 minsize = esize;
2776 }
2777 pos = e[i].start + e[i].size;
2778 i++;
2779 } while (e[i-1].size);
2780 if (found) {
2781 cnt++;
2782 dl->esize = minsize;
2783 }
2784 free(e);
2785 }
2786 if (cnt < raiddisks) {
2787 pr_err("not enough devices with space to create array.\n");
2788 return 0; /* No enough free spaces large enough */
2789 }
2790 if (size == 0) {
2791 /* choose the largest size of which there are at least 'raiddisk' */
2792 for (dl = ddf->dlist ; dl ; dl=dl->next) {
2793 struct dl *dl2;
2794 if (dl->esize <= size)
2795 continue;
2796 /* This is bigger than 'size', see if there are enough */
2797 cnt = 0;
2798 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
2799 if (dl2->esize >= dl->esize)
2800 cnt++;
2801 if (cnt >= raiddisks)
2802 size = dl->esize;
2803 }
2804 if (chunk) {
2805 size = size / chunk;
2806 size *= chunk;
2807 }
2808 *freesize = size;
2809 if (size < 32) {
2810 pr_err("not enough spare devices to create array.\n");
2811 return 0;
2812 }
2813 }
2814 /* We have a 'size' of which there are enough spaces.
2815 * We simply do a first-fit */
2816 cnt = 0;
2817 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
2818 if (dl->esize < size)
2819 continue;
2820
2821 dl->raiddisk = cnt;
2822 cnt++;
2823 }
2824 return 1;
2825 }
2826
2827 static int
2828 validate_geometry_ddf_container(struct supertype *st,
2829 int level, int layout, int raiddisks,
2830 int chunk, unsigned long long size,
2831 unsigned long long data_offset,
2832 char *dev, unsigned long long *freesize,
2833 int verbose);
2834
2835 static int validate_geometry_ddf_bvd(struct supertype *st,
2836 int level, int layout, int raiddisks,
2837 int *chunk, unsigned long long size,
2838 unsigned long long data_offset,
2839 char *dev, unsigned long long *freesize,
2840 int verbose);
2841
2842 static int validate_geometry_ddf(struct supertype *st,
2843 int level, int layout, int raiddisks,
2844 int *chunk, unsigned long long size,
2845 unsigned long long data_offset,
2846 char *dev, unsigned long long *freesize,
2847 int verbose)
2848 {
2849 int fd;
2850 struct mdinfo *sra;
2851 int cfd;
2852
2853 /* ddf potentially supports lots of things, but it depends on
2854 * what devices are offered (and maybe kernel version?)
2855 * If given unused devices, we will make a container.
2856 * If given devices in a container, we will make a BVD.
2857 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
2858 */
2859
2860 if (chunk && *chunk == UnSet)
2861 *chunk = DEFAULT_CHUNK;
2862
2863 if (level == -1000000) level = LEVEL_CONTAINER;
2864 if (level == LEVEL_CONTAINER) {
2865 /* Must be a fresh device to add to a container */
2866 return validate_geometry_ddf_container(st, level, layout,
2867 raiddisks, chunk?*chunk:0,
2868 size, data_offset, dev,
2869 freesize,
2870 verbose);
2871 }
2872
2873 if (!dev) {
2874 /* Initial sanity check. Exclude illegal levels. */
2875 int i;
2876 for (i=0; ddf_level_num[i].num1 != MAXINT; i++)
2877 if (ddf_level_num[i].num2 == level)
2878 break;
2879 if (ddf_level_num[i].num1 == MAXINT) {
2880 if (verbose)
2881 pr_err("DDF does not support level %d arrays\n",
2882 level);
2883 return 0;
2884 }
2885 /* Should check layout? etc */
2886
2887 if (st->sb && freesize) {
2888 /* --create was given a container to create in.
2889 * So we need to check that there are enough
2890 * free spaces and return the amount of space.
2891 * We may as well remember which drives were
2892 * chosen so that add_to_super/getinfo_super
2893 * can return them.
2894 */
2895 return reserve_space(st, raiddisks, size, chunk?*chunk:0, freesize);
2896 }
2897 return 1;
2898 }
2899
2900 if (st->sb) {
2901 /* A container has already been opened, so we are
2902 * creating in there. Maybe a BVD, maybe an SVD.
2903 * Should make a distinction one day.
2904 */
2905 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
2906 chunk, size, data_offset, dev,
2907 freesize,
2908 verbose);
2909 }
2910 /* This is the first device for the array.
2911 * If it is a container, we read it in and do automagic allocations,
2912 * no other devices should be given.
2913 * Otherwise it must be a member device of a container, and we
2914 * do manual allocation.
2915 * Later we should check for a BVD and make an SVD.
2916 */
2917 fd = open(dev, O_RDONLY|O_EXCL, 0);
2918 if (fd >= 0) {
2919 sra = sysfs_read(fd, NULL, GET_VERSION);
2920 close(fd);
2921 if (sra && sra->array.major_version == -1 &&
2922 strcmp(sra->text_version, "ddf") == 0) {
2923
2924 /* load super */
2925 /* find space for 'n' devices. */
2926 /* remember the devices */
2927 /* Somehow return the fact that we have enough */
2928 }
2929
2930 if (verbose)
2931 pr_err("ddf: Cannot create this array "
2932 "on device %s - a container is required.\n",
2933 dev);
2934 return 0;
2935 }
2936 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
2937 if (verbose)
2938 pr_err("ddf: Cannot open %s: %s\n",
2939 dev, strerror(errno));
2940 return 0;
2941 }
2942 /* Well, it is in use by someone, maybe a 'ddf' container. */
2943 cfd = open_container(fd);
2944 if (cfd < 0) {
2945 close(fd);
2946 if (verbose)
2947 pr_err("ddf: Cannot use %s: %s\n",
2948 dev, strerror(EBUSY));
2949 return 0;
2950 }
2951 sra = sysfs_read(cfd, NULL, GET_VERSION);
2952 close(fd);
2953 if (sra && sra->array.major_version == -1 &&
2954 strcmp(sra->text_version, "ddf") == 0) {
2955 /* This is a member of a ddf container. Load the container
2956 * and try to create a bvd
2957 */
2958 struct ddf_super *ddf;
2959 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
2960 st->sb = ddf;
2961 strcpy(st->container_devnm, fd2devnm(cfd));
2962 close(cfd);
2963 return validate_geometry_ddf_bvd(st, level, layout,
2964 raiddisks, chunk, size,
2965 data_offset,
2966 dev, freesize,
2967 verbose);
2968 }
2969 close(cfd);
2970 } else /* device may belong to a different container */
2971 return 0;
2972
2973 return 1;
2974 }
2975
2976 static int
2977 validate_geometry_ddf_container(struct supertype *st,
2978 int level, int layout, int raiddisks,
2979 int chunk, unsigned long long size,
2980 unsigned long long data_offset,
2981 char *dev, unsigned long long *freesize,
2982 int verbose)
2983 {
2984 int fd;
2985 unsigned long long ldsize;
2986
2987 if (level != LEVEL_CONTAINER)
2988 return 0;
2989 if (!dev)
2990 return 1;
2991
2992 fd = open(dev, O_RDONLY|O_EXCL, 0);
2993 if (fd < 0) {
2994 if (verbose)
2995 pr_err("ddf: Cannot open %s: %s\n",
2996 dev, strerror(errno));
2997 return 0;
2998 }
2999 if (!get_dev_size(fd, dev, &ldsize)) {
3000 close(fd);
3001 return 0;
3002 }
3003 close(fd);
3004
3005 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3006 if (*freesize == 0)
3007 return 0;
3008
3009 return 1;
3010 }
3011
3012 static int validate_geometry_ddf_bvd(struct supertype *st,
3013 int level, int layout, int raiddisks,
3014 int *chunk, unsigned long long size,
3015 unsigned long long data_offset,
3016 char *dev, unsigned long long *freesize,
3017 int verbose)
3018 {
3019 struct stat stb;
3020 struct ddf_super *ddf = st->sb;
3021 struct dl *dl;
3022 unsigned long long pos = 0;
3023 unsigned long long maxsize;
3024 struct extent *e;
3025 int i;
3026 /* ddf/bvd supports lots of things, but not containers */
3027 if (level == LEVEL_CONTAINER) {
3028 if (verbose)
3029 pr_err("DDF cannot create a container within an container\n");
3030 return 0;
3031 }
3032 /* We must have the container info already read in. */
3033 if (!ddf)
3034 return 0;
3035
3036 if (!dev) {
3037 /* General test: make sure there is space for
3038 * 'raiddisks' device extents of size 'size'.
3039 */
3040 unsigned long long minsize = size;
3041 int dcnt = 0;
3042 if (minsize == 0)
3043 minsize = 8;
3044 for (dl = ddf->dlist; dl ; dl = dl->next)
3045 {
3046 int found = 0;
3047 pos = 0;
3048
3049 i = 0;
3050 e = get_extents(ddf, dl);
3051 if (!e) continue;
3052 do {
3053 unsigned long long esize;
3054 esize = e[i].start - pos;
3055 if (esize >= minsize)
3056 found = 1;
3057 pos = e[i].start + e[i].size;
3058 i++;
3059 } while (e[i-1].size);
3060 if (found)
3061 dcnt++;
3062 free(e);
3063 }
3064 if (dcnt < raiddisks) {
3065 if (verbose)
3066 pr_err("ddf: Not enough devices with "
3067 "space for this array (%d < %d)\n",
3068 dcnt, raiddisks);
3069 return 0;
3070 }
3071 return 1;
3072 }
3073 /* This device must be a member of the set */
3074 if (stat(dev, &stb) < 0)
3075 return 0;
3076 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3077 return 0;
3078 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3079 if (dl->major == (int)major(stb.st_rdev) &&
3080 dl->minor == (int)minor(stb.st_rdev))
3081 break;
3082 }
3083 if (!dl) {
3084 if (verbose)
3085 pr_err("ddf: %s is not in the "
3086 "same DDF set\n",
3087 dev);
3088 return 0;
3089 }
3090 e = get_extents(ddf, dl);
3091 maxsize = 0;
3092 i = 0;
3093 if (e) do {
3094 unsigned long long esize;
3095 esize = e[i].start - pos;
3096 if (esize >= maxsize)
3097 maxsize = esize;
3098 pos = e[i].start + e[i].size;
3099 i++;
3100 } while (e[i-1].size);
3101 *freesize = maxsize;
3102 // FIXME here I am
3103
3104 return 1;
3105 }
3106
3107 static int load_super_ddf_all(struct supertype *st, int fd,
3108 void **sbp, char *devname)
3109 {
3110 struct mdinfo *sra;
3111 struct ddf_super *super;
3112 struct mdinfo *sd, *best = NULL;
3113 int bestseq = 0;
3114 int seq;
3115 char nm[20];
3116 int dfd;
3117
3118 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3119 if (!sra)
3120 return 1;
3121 if (sra->array.major_version != -1 ||
3122 sra->array.minor_version != -2 ||
3123 strcmp(sra->text_version, "ddf") != 0)
3124 return 1;
3125
3126 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3127 return 1;
3128 memset(super, 0, sizeof(*super));
3129
3130 /* first, try each device, and choose the best ddf */
3131 for (sd = sra->devs ; sd ; sd = sd->next) {
3132 int rv;
3133 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3134 dfd = dev_open(nm, O_RDONLY);
3135 if (dfd < 0)
3136 return 2;
3137 rv = load_ddf_headers(dfd, super, NULL);
3138 close(dfd);
3139 if (rv == 0) {
3140 seq = __be32_to_cpu(super->active->seq);
3141 if (super->active->openflag)
3142 seq--;
3143 if (!best || seq > bestseq) {
3144 bestseq = seq;
3145 best = sd;
3146 }
3147 }
3148 }
3149 if (!best)
3150 return 1;
3151 /* OK, load this ddf */
3152 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3153 dfd = dev_open(nm, O_RDONLY);
3154 if (dfd < 0)
3155 return 1;
3156 load_ddf_headers(dfd, super, NULL);
3157 load_ddf_global(dfd, super, NULL);
3158 close(dfd);
3159 /* Now we need the device-local bits */
3160 for (sd = sra->devs ; sd ; sd = sd->next) {
3161 int rv;
3162
3163 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3164 dfd = dev_open(nm, O_RDWR);
3165 if (dfd < 0)
3166 return 2;
3167 rv = load_ddf_headers(dfd, super, NULL);
3168 if (rv == 0)
3169 rv = load_ddf_local(dfd, super, NULL, 1);
3170 if (rv)
3171 return 1;
3172 }
3173
3174 *sbp = super;
3175 if (st->ss == NULL) {
3176 st->ss = &super_ddf;
3177 st->minor_version = 0;
3178 st->max_devs = 512;
3179 }
3180 strcpy(st->container_devnm, fd2devnm(fd));
3181 return 0;
3182 }
3183
3184 static int load_container_ddf(struct supertype *st, int fd,
3185 char *devname)
3186 {
3187 return load_super_ddf_all(st, fd, &st->sb, devname);
3188 }
3189
3190 #endif /* MDASSEMBLE */
3191
3192 static int check_secondary(const struct vcl *vc)
3193 {
3194 const struct vd_config *conf = &vc->conf;
3195 int i;
3196
3197 /* The only DDF secondary RAID level md can support is
3198 * RAID 10, if the stripe sizes and Basic volume sizes
3199 * are all equal.
3200 * Other configurations could in theory be supported by exposing
3201 * the BVDs to user space and using device mapper for the secondary
3202 * mapping. So far we don't support that.
3203 */
3204
3205 __u64 sec_elements[4] = {0, 0, 0, 0};
3206 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3207 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3208
3209 if (vc->other_bvds == NULL) {
3210 pr_err("No BVDs for secondary RAID found\n");
3211 return -1;
3212 }
3213 if (conf->prl != DDF_RAID1) {
3214 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3215 return -1;
3216 }
3217 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3218 pr_err("Secondary RAID level %d is unsupported\n",
3219 conf->srl);
3220 return -1;
3221 }
3222 __set_sec_seen(conf->sec_elmnt_seq);
3223 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3224 const struct vd_config *bvd = vc->other_bvds[i];
3225 if (bvd == NULL) {
3226 pr_err("BVD %d is missing\n", i+1);
3227 return -1;
3228 }
3229 if (bvd->srl != conf->srl) {
3230 pr_err("Inconsistent secondary RAID level across BVDs\n");
3231 return -1;
3232 }
3233 if (bvd->prl != conf->prl) {
3234 pr_err("Different RAID levels for BVDs are unsupported\n");
3235 return -1;
3236 }
3237 if (bvd->prim_elmnt_count != conf->prim_elmnt_count) {
3238 pr_err("All BVDs must have the same number of primary elements\n");
3239 return -1;
3240 }
3241 if (bvd->chunk_shift != conf->chunk_shift) {
3242 pr_err("Different strip sizes for BVDs are unsupported\n");
3243 return -1;
3244 }
3245 if (bvd->array_blocks != conf->array_blocks) {
3246 pr_err("Different BVD sizes are unsupported\n");
3247 return -1;
3248 }
3249 __set_sec_seen(bvd->sec_elmnt_seq);
3250 }
3251 for (i = 0; i < conf->sec_elmnt_count; i++) {
3252 if (!__was_sec_seen(i)) {
3253 pr_err("BVD %d is missing\n", i);
3254 return -1;
3255 }
3256 }
3257 return 0;
3258 }
3259
3260 #define NO_SUCH_REFNUM (0xFFFFFFFF)
3261 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3262 __u32 refnum, unsigned int nmax,
3263 const struct vd_config **bvd,
3264 unsigned int *idx)
3265 {
3266 unsigned int i, j, n, sec, cnt;
3267
3268 cnt = __be16_to_cpu(vc->conf.prim_elmnt_count);
3269 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3270
3271 for (i = 0, j = 0 ; i < nmax ; i++) {
3272 /* j counts valid entries for this BVD */
3273 if (vc->conf.phys_refnum[i] != 0xffffffff)
3274 j++;
3275 if (vc->conf.phys_refnum[i] == refnum) {
3276 *bvd = &vc->conf;
3277 *idx = i;
3278 return sec * cnt + j - 1;
3279 }
3280 }
3281 if (vc->other_bvds == NULL)
3282 goto bad;
3283
3284 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3285 struct vd_config *vd = vc->other_bvds[n-1];
3286 if (vd == NULL)
3287 continue;
3288 sec = vd->sec_elmnt_seq;
3289 for (i = 0, j = 0 ; i < nmax ; i++) {
3290 if (vd->phys_refnum[i] != 0xffffffff)
3291 j++;
3292 if (vd->phys_refnum[i] == refnum) {
3293 *bvd = vd;
3294 *idx = i;
3295 return sec * cnt + j - 1;
3296 }
3297 }
3298 }
3299 bad:
3300 *bvd = NULL;
3301 return NO_SUCH_REFNUM;
3302 }
3303
3304 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3305 {
3306 /* Given a container loaded by load_super_ddf_all,
3307 * extract information about all the arrays into
3308 * an mdinfo tree.
3309 *
3310 * For each vcl in conflist: create an mdinfo, fill it in,
3311 * then look for matching devices (phys_refnum) in dlist
3312 * and create appropriate device mdinfo.
3313 */
3314 struct ddf_super *ddf = st->sb;
3315 struct mdinfo *rest = NULL;
3316 struct vcl *vc;
3317
3318 for (vc = ddf->conflist ; vc ; vc=vc->next)
3319 {
3320 unsigned int i;
3321 unsigned int j;
3322 struct mdinfo *this;
3323 char *ep;
3324 __u32 *cptr;
3325 unsigned int pd;
3326
3327 if (subarray &&
3328 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3329 *ep != '\0'))
3330 continue;
3331
3332 if (vc->conf.sec_elmnt_count > 1) {
3333 if (check_secondary(vc) != 0)
3334 continue;
3335 }
3336
3337 this = xcalloc(1, sizeof(*this));
3338 this->next = rest;
3339 rest = this;
3340
3341 if (vc->conf.sec_elmnt_count == 1) {
3342 this->array.level = map_num1(ddf_level_num,
3343 vc->conf.prl);
3344 this->array.raid_disks =
3345 __be16_to_cpu(vc->conf.prim_elmnt_count);
3346 this->array.layout =
3347 rlq_to_layout(vc->conf.rlq, vc->conf.prl,
3348 this->array.raid_disks);
3349 } else {
3350 /* The only supported layout is RAID 10.
3351 * Compatibility has been checked in check_secondary()
3352 * above.
3353 */
3354 this->array.level = 10;
3355 this->array.raid_disks =
3356 __be16_to_cpu(vc->conf.prim_elmnt_count)
3357 * vc->conf.sec_elmnt_count;
3358 this->array.layout = 0x100 |
3359 __be16_to_cpu(vc->conf.prim_elmnt_count);
3360 }
3361 this->array.md_minor = -1;
3362 this->array.major_version = -1;
3363 this->array.minor_version = -2;
3364 cptr = (__u32 *)(vc->conf.guid + 16);
3365 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3366 this->array.utime = DECADE +
3367 __be32_to_cpu(vc->conf.timestamp);
3368 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3369
3370 i = vc->vcnum;
3371 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3372 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3373 DDF_init_full) {
3374 this->array.state = 0;
3375 this->resync_start = 0;
3376 } else {
3377 this->array.state = 1;
3378 this->resync_start = MaxSector;
3379 }
3380 memcpy(this->name, ddf->virt->entries[i].name, 16);
3381 this->name[16]=0;
3382 for(j=0; j<16; j++)
3383 if (this->name[j] == ' ')
3384 this->name[j] = 0;
3385
3386 memset(this->uuid, 0, sizeof(this->uuid));
3387 this->component_size = __be64_to_cpu(vc->conf.blocks);
3388 this->array.size = this->component_size / 2;
3389 this->container_member = i;
3390
3391 ddf->currentconf = vc;
3392 uuid_from_super_ddf(st, this->uuid);
3393 ddf->currentconf = NULL;
3394
3395 sprintf(this->text_version, "/%s/%d",
3396 st->container_devnm, this->container_member);
3397
3398 for (pd = 0; pd < __be16_to_cpu(ddf->phys->used_pdes); pd++) {
3399 struct mdinfo *dev;
3400 struct dl *d;
3401 const struct vd_config *bvd;
3402 unsigned int iphys;
3403 __u64 *lba_offset;
3404 int stt;
3405
3406 if (ddf->phys->entries[pd].refnum == 0xFFFFFFFF)
3407 continue;
3408
3409 stt = __be16_to_cpu(ddf->phys->entries[pd].state);
3410 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3411 != DDF_Online)
3412 continue;
3413
3414 i = get_pd_index_from_refnum(
3415 vc, ddf->phys->entries[pd].refnum,
3416 ddf->mppe, &bvd, &iphys);
3417 if (i == NO_SUCH_REFNUM)
3418 continue;
3419
3420 this->array.working_disks++;
3421
3422 for (d = ddf->dlist; d ; d=d->next)
3423 if (d->disk.refnum ==
3424 ddf->phys->entries[pd].refnum)
3425 break;
3426 if (d == NULL)
3427 /* Haven't found that one yet, maybe there are others */
3428 continue;
3429
3430 dev = xcalloc(1, sizeof(*dev));
3431 dev->next = this->devs;
3432 this->devs = dev;
3433
3434 dev->disk.number = __be32_to_cpu(d->disk.refnum);
3435 dev->disk.major = d->major;
3436 dev->disk.minor = d->minor;
3437 dev->disk.raid_disk = i;
3438 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3439 dev->recovery_start = MaxSector;
3440
3441 dev->events = __be32_to_cpu(ddf->primary.seq);
3442 lba_offset = (__u64 *)&bvd->phys_refnum[ddf->mppe];
3443 dev->data_offset = __be64_to_cpu(lba_offset[iphys]);
3444 dev->component_size = __be64_to_cpu(bvd->blocks);
3445 if (d->devname)
3446 strcpy(dev->name, d->devname);
3447 }
3448 }
3449 return rest;
3450 }
3451
3452 static int store_super_ddf(struct supertype *st, int fd)
3453 {
3454 struct ddf_super *ddf = st->sb;
3455 unsigned long long dsize;
3456 void *buf;
3457 int rc;
3458
3459 if (!ddf)
3460 return 1;
3461
3462 /* ->dlist and ->conflist will be set for updates, currently not
3463 * supported
3464 */
3465 if (ddf->dlist || ddf->conflist)
3466 return 1;
3467
3468 if (!get_dev_size(fd, NULL, &dsize))
3469 return 1;
3470
3471 if (posix_memalign(&buf, 512, 512) != 0)
3472 return 1;
3473 memset(buf, 0, 512);
3474
3475 lseek64(fd, dsize-512, 0);
3476 rc = write(fd, buf, 512);
3477 free(buf);
3478 if (rc < 0)
3479 return 1;
3480 return 0;
3481 }
3482
3483 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3484 {
3485 /*
3486 * return:
3487 * 0 same, or first was empty, and second was copied
3488 * 1 second had wrong number
3489 * 2 wrong uuid
3490 * 3 wrong other info
3491 */
3492 struct ddf_super *first = st->sb;
3493 struct ddf_super *second = tst->sb;
3494 struct dl *dl1, *dl2;
3495 struct vcl *vl1, *vl2;
3496 unsigned int max_vds, max_pds, pd, vd;
3497
3498 if (!first) {
3499 st->sb = tst->sb;
3500 tst->sb = NULL;
3501 return 0;
3502 }
3503
3504 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3505 return 2;
3506
3507 if (first->anchor.seq != second->anchor.seq) {
3508 dprintf("%s: sequence number mismatch %u/%u\n", __func__,
3509 __be32_to_cpu(first->anchor.seq),
3510 __be32_to_cpu(second->anchor.seq));
3511 return 3;
3512 }
3513 if (first->max_part != second->max_part ||
3514 first->phys->used_pdes != second->phys->used_pdes ||
3515 first->virt->populated_vdes != second->virt->populated_vdes) {
3516 dprintf("%s: PD/VD number mismatch\n", __func__);
3517 return 3;
3518 }
3519
3520 max_pds = __be16_to_cpu(first->phys->used_pdes);
3521 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3522 for (pd = 0; pd < max_pds; pd++)
3523 if (first->phys->entries[pd].refnum == dl2->disk.refnum)
3524 break;
3525 if (pd == max_pds) {
3526 dprintf("%s: no match for disk %08x\n", __func__,
3527 __be32_to_cpu(dl2->disk.refnum));
3528 return 3;
3529 }
3530 }
3531
3532 max_vds = __be16_to_cpu(first->active->max_vd_entries);
3533 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3534 if (vl2->conf.magic != DDF_VD_CONF_MAGIC)
3535 continue;
3536 for (vd = 0; vd < max_vds; vd++)
3537 if (!memcmp(first->virt->entries[vd].guid,
3538 vl2->conf.guid, DDF_GUID_LEN))
3539 break;
3540 if (vd == max_vds) {
3541 dprintf("%s: no match for VD config\n", __func__);
3542 return 3;
3543 }
3544 }
3545 /* FIXME should I look at anything else? */
3546
3547 /*
3548 At this point we are fairly sure that the meta data matches.
3549 But the new disk may contain additional local data.
3550 Add it to the super block.
3551 */
3552 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3553 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3554 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3555 DDF_GUID_LEN))
3556 break;
3557 if (vl1) {
3558 if (vl1->other_bvds != NULL &&
3559 vl1->conf.sec_elmnt_seq !=
3560 vl2->conf.sec_elmnt_seq) {
3561 dprintf("%s: adding BVD %u\n", __func__,
3562 vl2->conf.sec_elmnt_seq);
3563 add_other_bvd(vl1, &vl2->conf,
3564 first->conf_rec_len*512);
3565 }
3566 continue;
3567 }
3568
3569 if (posix_memalign((void **)&vl1, 512,
3570 (first->conf_rec_len*512 +
3571 offsetof(struct vcl, conf))) != 0) {
3572 pr_err("%s could not allocate vcl buf\n",
3573 __func__);
3574 return 3;
3575 }
3576
3577 vl1->next = first->conflist;
3578 vl1->block_sizes = NULL;
3579 if (vl2->conf.sec_elmnt_count > 1) {
3580 vl1->other_bvds = xcalloc(vl2->conf.sec_elmnt_count - 1,
3581 sizeof(struct vd_config *));
3582 } else
3583 vl1->other_bvds = NULL;
3584 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3585 vl1->lba_offset = (__u64 *)
3586 &vl1->conf.phys_refnum[first->mppe];
3587 for (vd = 0; vd < max_vds; vd++)
3588 if (!memcmp(first->virt->entries[vd].guid,
3589 vl1->conf.guid, DDF_GUID_LEN))
3590 break;
3591 vl1->vcnum = vd;
3592 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
3593 first->conflist = vl1;
3594 }
3595
3596 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3597 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3598 if (dl1->disk.refnum == dl2->disk.refnum)
3599 break;
3600 if (dl1)
3601 continue;
3602
3603 if (posix_memalign((void **)&dl1, 512,
3604 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
3605 != 0) {
3606 pr_err("%s could not allocate disk info buffer\n",
3607 __func__);
3608 return 3;
3609 }
3610 memcpy(dl1, dl2, sizeof(*dl1));
3611 dl1->mdupdate = NULL;
3612 dl1->next = first->dlist;
3613 dl1->fd = -1;
3614 for (pd = 0; pd < max_pds; pd++)
3615 if (first->phys->entries[pd].refnum == dl1->disk.refnum)
3616 break;
3617 dl1->pdnum = pd;
3618 if (dl2->spare) {
3619 if (posix_memalign((void **)&dl1->spare, 512,
3620 first->conf_rec_len*512) != 0) {
3621 pr_err("%s could not allocate spare info buf\n",
3622 __func__);
3623 return 3;
3624 }
3625 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
3626 }
3627 for (vd = 0 ; vd < first->max_part ; vd++) {
3628 if (!dl2->vlist[vd]) {
3629 dl1->vlist[vd] = NULL;
3630 continue;
3631 }
3632 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
3633 if (!memcmp(vl1->conf.guid,
3634 dl2->vlist[vd]->conf.guid,
3635 DDF_GUID_LEN))
3636 break;
3637 dl1->vlist[vd] = vl1;
3638 }
3639 }
3640 first->dlist = dl1;
3641 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
3642 dl1->disk.refnum);
3643 }
3644
3645 return 0;
3646 }
3647
3648 #ifndef MDASSEMBLE
3649 /*
3650 * A new array 'a' has been started which claims to be instance 'inst'
3651 * within container 'c'.
3652 * We need to confirm that the array matches the metadata in 'c' so
3653 * that we don't corrupt any metadata.
3654 */
3655 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
3656 {
3657 dprintf("ddf: open_new %s\n", inst);
3658 a->info.container_member = atoi(inst);
3659 return 0;
3660 }
3661
3662 /*
3663 * The array 'a' is to be marked clean in the metadata.
3664 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
3665 * clean up to the point (in sectors). If that cannot be recorded in the
3666 * metadata, then leave it as dirty.
3667 *
3668 * For DDF, we need to clear the DDF_state_inconsistent bit in the
3669 * !global! virtual_disk.virtual_entry structure.
3670 */
3671 static int ddf_set_array_state(struct active_array *a, int consistent)
3672 {
3673 struct ddf_super *ddf = a->container->sb;
3674 int inst = a->info.container_member;
3675 int old = ddf->virt->entries[inst].state;
3676 if (consistent == 2) {
3677 /* Should check if a recovery should be started FIXME */
3678 consistent = 1;
3679 if (!is_resync_complete(&a->info))
3680 consistent = 0;
3681 }
3682 if (consistent)
3683 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
3684 else
3685 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
3686 if (old != ddf->virt->entries[inst].state)
3687 ddf_set_updates_pending(ddf);
3688
3689 old = ddf->virt->entries[inst].init_state;
3690 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
3691 if (is_resync_complete(&a->info))
3692 ddf->virt->entries[inst].init_state |= DDF_init_full;
3693 else if (a->info.resync_start == 0)
3694 ddf->virt->entries[inst].init_state |= DDF_init_not;
3695 else
3696 ddf->virt->entries[inst].init_state |= DDF_init_quick;
3697 if (old != ddf->virt->entries[inst].init_state)
3698 ddf_set_updates_pending(ddf);
3699
3700 dprintf("ddf mark %d %s %llu\n", inst, consistent?"clean":"dirty",
3701 a->info.resync_start);
3702 return consistent;
3703 }
3704
3705 #define container_of(ptr, type, member) ({ \
3706 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
3707 (type *)( (char *)__mptr - offsetof(type,member) );})
3708 /*
3709 * The state of each disk is stored in the global phys_disk structure
3710 * in phys_disk.entries[n].state.
3711 * This makes various combinations awkward.
3712 * - When a device fails in any array, it must be failed in all arrays
3713 * that include a part of this device.
3714 * - When a component is rebuilding, we cannot include it officially in the
3715 * array unless this is the only array that uses the device.
3716 *
3717 * So: when transitioning:
3718 * Online -> failed, just set failed flag. monitor will propagate
3719 * spare -> online, the device might need to be added to the array.
3720 * spare -> failed, just set failed. Don't worry if in array or not.
3721 */
3722 static void ddf_set_disk(struct active_array *a, int n, int state)
3723 {
3724 struct ddf_super *ddf = a->container->sb;
3725 unsigned int inst = a->info.container_member;
3726 struct vd_config *vc = find_vdcr(ddf, inst);
3727 int pd = find_phys(ddf, vc->phys_refnum[n]);
3728 int i, st, working;
3729 struct mdinfo *mdi;
3730 struct dl *dl;
3731
3732 if (vc == NULL) {
3733 dprintf("ddf: cannot find instance %d!!\n", inst);
3734 return;
3735 }
3736 /* Find the matching slot in 'info'. */
3737 for (mdi = a->info.devs; mdi; mdi = mdi->next)
3738 if (mdi->disk.raid_disk == n)
3739 break;
3740 if (!mdi)
3741 return;
3742
3743 /* and find the 'dl' entry corresponding to that. */
3744 for (dl = ddf->dlist; dl; dl = dl->next)
3745 if (mdi->state_fd >= 0 &&
3746 mdi->disk.major == dl->major &&
3747 mdi->disk.minor == dl->minor)
3748 break;
3749 if (!dl)
3750 return;
3751
3752 if (pd < 0 || pd != dl->pdnum) {
3753 /* disk doesn't currently exist or has changed.
3754 * If it is now in_sync, insert it. */
3755 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
3756 struct vcl *vcl;
3757 pd = dl->pdnum;
3758 vc->phys_refnum[n] = dl->disk.refnum;
3759 vcl = container_of(vc, struct vcl, conf);
3760 vcl->lba_offset[n] = mdi->data_offset;
3761 ddf->phys->entries[pd].type &=
3762 ~__cpu_to_be16(DDF_Global_Spare);
3763 ddf->phys->entries[pd].type |=
3764 __cpu_to_be16(DDF_Active_in_VD);
3765 ddf_set_updates_pending(ddf);
3766 }
3767 } else {
3768 int old = ddf->phys->entries[pd].state;
3769 if (state & DS_FAULTY)
3770 ddf->phys->entries[pd].state |= __cpu_to_be16(DDF_Failed);
3771 if (state & DS_INSYNC) {
3772 ddf->phys->entries[pd].state |= __cpu_to_be16(DDF_Online);
3773 ddf->phys->entries[pd].state &= __cpu_to_be16(~DDF_Rebuilding);
3774 }
3775 if (old != ddf->phys->entries[pd].state)
3776 ddf_set_updates_pending(ddf);
3777 }
3778
3779 dprintf("ddf: set_disk %d to %x\n", n, state);
3780
3781 /* Now we need to check the state of the array and update
3782 * virtual_disk.entries[n].state.
3783 * It needs to be one of "optimal", "degraded", "failed".
3784 * I don't understand 'deleted' or 'missing'.
3785 */
3786 working = 0;
3787 for (i=0; i < a->info.array.raid_disks; i++) {
3788 pd = find_phys(ddf, vc->phys_refnum[i]);
3789 if (pd < 0)
3790 continue;
3791 st = __be16_to_cpu(ddf->phys->entries[pd].state);
3792 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3793 == DDF_Online)
3794 working++;
3795 }
3796 state = DDF_state_degraded;
3797 if (working == a->info.array.raid_disks)
3798 state = DDF_state_optimal;
3799 else switch(vc->prl) {
3800 case DDF_RAID0:
3801 case DDF_CONCAT:
3802 case DDF_JBOD:
3803 state = DDF_state_failed;
3804 break;
3805 case DDF_RAID1:
3806 if (working == 0)
3807 state = DDF_state_failed;
3808 else if (working == 2 && state == DDF_state_degraded)
3809 state = DDF_state_part_optimal;
3810 break;
3811 case DDF_RAID4:
3812 case DDF_RAID5:
3813 if (working < a->info.array.raid_disks-1)
3814 state = DDF_state_failed;
3815 break;
3816 case DDF_RAID6:
3817 if (working < a->info.array.raid_disks-2)
3818 state = DDF_state_failed;
3819 else if (working == a->info.array.raid_disks-1)
3820 state = DDF_state_part_optimal;
3821 break;
3822 }
3823
3824 if (ddf->virt->entries[inst].state !=
3825 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
3826 | state)) {
3827
3828 ddf->virt->entries[inst].state =
3829 (ddf->virt->entries[inst].state & ~DDF_state_mask)
3830 | state;
3831 ddf_set_updates_pending(ddf);
3832 }
3833
3834 }
3835
3836 static void ddf_sync_metadata(struct supertype *st)
3837 {
3838
3839 /*
3840 * Write all data to all devices.
3841 * Later, we might be able to track whether only local changes
3842 * have been made, or whether any global data has been changed,
3843 * but ddf is sufficiently weird that it probably always
3844 * changes global data ....
3845 */
3846 struct ddf_super *ddf = st->sb;
3847 if (!ddf->updates_pending)
3848 return;
3849 ddf->updates_pending = 0;
3850 __write_init_super_ddf(st);
3851 dprintf("ddf: sync_metadata\n");
3852 }
3853
3854 static void ddf_process_update(struct supertype *st,
3855 struct metadata_update *update)
3856 {
3857 /* Apply this update to the metadata.
3858 * The first 4 bytes are a DDF_*_MAGIC which guides
3859 * our actions.
3860 * Possible update are:
3861 * DDF_PHYS_RECORDS_MAGIC
3862 * Add a new physical device or remove an old one.
3863 * Changes to this record only happen implicitly.
3864 * used_pdes is the device number.
3865 * DDF_VIRT_RECORDS_MAGIC
3866 * Add a new VD. Possibly also change the 'access' bits.
3867 * populated_vdes is the entry number.
3868 * DDF_VD_CONF_MAGIC
3869 * New or updated VD. the VIRT_RECORD must already
3870 * exist. For an update, phys_refnum and lba_offset
3871 * (at least) are updated, and the VD_CONF must
3872 * be written to precisely those devices listed with
3873 * a phys_refnum.
3874 * DDF_SPARE_ASSIGN_MAGIC
3875 * replacement Spare Assignment Record... but for which device?
3876 *
3877 * So, e.g.:
3878 * - to create a new array, we send a VIRT_RECORD and
3879 * a VD_CONF. Then assemble and start the array.
3880 * - to activate a spare we send a VD_CONF to add the phys_refnum
3881 * and offset. This will also mark the spare as active with
3882 * a spare-assignment record.
3883 */
3884 struct ddf_super *ddf = st->sb;
3885 __u32 *magic = (__u32*)update->buf;
3886 struct phys_disk *pd;
3887 struct virtual_disk *vd;
3888 struct vd_config *vc;
3889 struct vcl *vcl;
3890 struct dl *dl;
3891 unsigned int mppe;
3892 unsigned int ent;
3893 unsigned int pdnum, pd2;
3894
3895 dprintf("Process update %x\n", *magic);
3896
3897 switch (*magic) {
3898 case DDF_PHYS_RECORDS_MAGIC:
3899
3900 if (update->len != (sizeof(struct phys_disk) +
3901 sizeof(struct phys_disk_entry)))
3902 return;
3903 pd = (struct phys_disk*)update->buf;
3904
3905 ent = __be16_to_cpu(pd->used_pdes);
3906 if (ent >= __be16_to_cpu(ddf->phys->max_pdes))
3907 return;
3908 if (pd->entries[0].state & __cpu_to_be16(DDF_Missing)) {
3909 struct dl **dlp;
3910 /* removing this disk. */
3911 ddf->phys->entries[ent].state |= __cpu_to_be16(DDF_Missing);
3912 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
3913 struct dl *dl = *dlp;
3914 if (dl->pdnum == (signed)ent) {
3915 close(dl->fd);
3916 dl->fd = -1;
3917 /* FIXME this doesn't free
3918 * dl->devname */
3919 update->space = dl;
3920 *dlp = dl->next;
3921 break;
3922 }
3923 }
3924 ddf_set_updates_pending(ddf);
3925 return;
3926 }
3927 if (!all_ff(ddf->phys->entries[ent].guid))
3928 return;
3929 ddf->phys->entries[ent] = pd->entries[0];
3930 ddf->phys->used_pdes = __cpu_to_be16(1 +
3931 __be16_to_cpu(ddf->phys->used_pdes));
3932 ddf_set_updates_pending(ddf);
3933 if (ddf->add_list) {
3934 struct active_array *a;
3935 struct dl *al = ddf->add_list;
3936 ddf->add_list = al->next;
3937
3938 al->next = ddf->dlist;
3939 ddf->dlist = al;
3940
3941 /* As a device has been added, we should check
3942 * for any degraded devices that might make
3943 * use of this spare */
3944 for (a = st->arrays ; a; a=a->next)
3945 a->check_degraded = 1;
3946 }
3947 break;
3948
3949 case DDF_VIRT_RECORDS_MAGIC:
3950
3951 if (update->len != (sizeof(struct virtual_disk) +
3952 sizeof(struct virtual_entry)))
3953 return;
3954 vd = (struct virtual_disk*)update->buf;
3955
3956 ent = __be16_to_cpu(vd->populated_vdes);
3957 if (ent >= __be16_to_cpu(ddf->virt->max_vdes))
3958 return;
3959 if (!all_ff(ddf->virt->entries[ent].guid))
3960 return;
3961 ddf->virt->entries[ent] = vd->entries[0];
3962 ddf->virt->populated_vdes = __cpu_to_be16(1 +
3963 __be16_to_cpu(ddf->virt->populated_vdes));
3964 ddf_set_updates_pending(ddf);
3965 break;
3966
3967 case DDF_VD_CONF_MAGIC:
3968 dprintf("len %d %d\n", update->len, ddf->conf_rec_len);
3969
3970 mppe = __be16_to_cpu(ddf->anchor.max_primary_element_entries);
3971 if ((unsigned)update->len != ddf->conf_rec_len * 512)
3972 return;
3973 vc = (struct vd_config*)update->buf;
3974 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
3975 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
3976 break;
3977 dprintf("vcl = %p\n", vcl);
3978 if (vcl) {
3979 /* An update, just copy the phys_refnum and lba_offset
3980 * fields
3981 */
3982 memcpy(vcl->conf.phys_refnum, vc->phys_refnum,
3983 mppe * (sizeof(__u32) + sizeof(__u64)));
3984 } else {
3985 /* A new VD_CONF */
3986 if (!update->space)
3987 return;
3988 vcl = update->space;
3989 update->space = NULL;
3990 vcl->next = ddf->conflist;
3991 memcpy(&vcl->conf, vc, update->len);
3992 vcl->lba_offset = (__u64*)
3993 &vcl->conf.phys_refnum[mppe];
3994 for (ent = 0;
3995 ent < __be16_to_cpu(ddf->virt->populated_vdes);
3996 ent++)
3997 if (memcmp(vc->guid, ddf->virt->entries[ent].guid,
3998 DDF_GUID_LEN) == 0) {
3999 vcl->vcnum = ent;
4000 break;
4001 }
4002 ddf->conflist = vcl;
4003 }
4004 /* Set DDF_Transition on all Failed devices - to help
4005 * us detect those that are no longer in use
4006 */
4007 for (pdnum = 0; pdnum < __be16_to_cpu(ddf->phys->used_pdes); pdnum++)
4008 if (ddf->phys->entries[pdnum].state
4009 & __be16_to_cpu(DDF_Failed))
4010 ddf->phys->entries[pdnum].state
4011 |= __be16_to_cpu(DDF_Transition);
4012 /* Now make sure vlist is correct for each dl. */
4013 for (dl = ddf->dlist; dl; dl = dl->next) {
4014 unsigned int dn;
4015 unsigned int vn = 0;
4016 int in_degraded = 0;
4017 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4018 for (dn=0; dn < ddf->mppe ; dn++)
4019 if (vcl->conf.phys_refnum[dn] ==
4020 dl->disk.refnum) {
4021 int vstate;
4022 dprintf("dev %d has %p at %d\n",
4023 dl->pdnum, vcl, vn);
4024 /* Clear the Transition flag */
4025 if (ddf->phys->entries[dl->pdnum].state
4026 & __be16_to_cpu(DDF_Failed))
4027 ddf->phys->entries[dl->pdnum].state &=
4028 ~__be16_to_cpu(DDF_Transition);
4029
4030 dl->vlist[vn++] = vcl;
4031 vstate = ddf->virt->entries[vcl->vcnum].state
4032 & DDF_state_mask;
4033 if (vstate == DDF_state_degraded ||
4034 vstate == DDF_state_part_optimal)
4035 in_degraded = 1;
4036 break;
4037 }
4038 while (vn < ddf->max_part)
4039 dl->vlist[vn++] = NULL;
4040 if (dl->vlist[0]) {
4041 ddf->phys->entries[dl->pdnum].type &=
4042 ~__cpu_to_be16(DDF_Global_Spare);
4043 if (!(ddf->phys->entries[dl->pdnum].type &
4044 __cpu_to_be16(DDF_Active_in_VD))) {
4045 ddf->phys->entries[dl->pdnum].type |=
4046 __cpu_to_be16(DDF_Active_in_VD);
4047 if (in_degraded)
4048 ddf->phys->entries[dl->pdnum].state |=
4049 __cpu_to_be16(DDF_Rebuilding);
4050 }
4051 }
4052 if (dl->spare) {
4053 ddf->phys->entries[dl->pdnum].type &=
4054 ~__cpu_to_be16(DDF_Global_Spare);
4055 ddf->phys->entries[dl->pdnum].type |=
4056 __cpu_to_be16(DDF_Spare);
4057 }
4058 if (!dl->vlist[0] && !dl->spare) {
4059 ddf->phys->entries[dl->pdnum].type |=
4060 __cpu_to_be16(DDF_Global_Spare);
4061 ddf->phys->entries[dl->pdnum].type &=
4062 ~__cpu_to_be16(DDF_Spare |
4063 DDF_Active_in_VD);
4064 }
4065 }
4066
4067 /* Now remove any 'Failed' devices that are not part
4068 * of any VD. They will have the Transition flag set.
4069 * Once done, we need to update all dl->pdnum numbers.
4070 */
4071 pd2 = 0;
4072 for (pdnum = 0; pdnum < __be16_to_cpu(ddf->phys->used_pdes); pdnum++)
4073 if ((ddf->phys->entries[pdnum].state
4074 & __be16_to_cpu(DDF_Failed))
4075 && (ddf->phys->entries[pdnum].state
4076 & __be16_to_cpu(DDF_Transition)))
4077 /* skip this one */;
4078 else if (pdnum == pd2)
4079 pd2++;
4080 else {
4081 ddf->phys->entries[pd2] = ddf->phys->entries[pdnum];
4082 for (dl = ddf->dlist; dl; dl = dl->next)
4083 if (dl->pdnum == (int)pdnum)
4084 dl->pdnum = pd2;
4085 pd2++;
4086 }
4087 ddf->phys->used_pdes = __cpu_to_be16(pd2);
4088 while (pd2 < pdnum) {
4089 memset(ddf->phys->entries[pd2].guid, 0xff, DDF_GUID_LEN);
4090 pd2++;
4091 }
4092
4093 ddf_set_updates_pending(ddf);
4094 break;
4095 case DDF_SPARE_ASSIGN_MAGIC:
4096 default: break;
4097 }
4098 }
4099
4100 static void ddf_prepare_update(struct supertype *st,
4101 struct metadata_update *update)
4102 {
4103 /* This update arrived at managemon.
4104 * We are about to pass it to monitor.
4105 * If a malloc is needed, do it here.
4106 */
4107 struct ddf_super *ddf = st->sb;
4108 __u32 *magic = (__u32*)update->buf;
4109 if (*magic == DDF_VD_CONF_MAGIC)
4110 if (posix_memalign(&update->space, 512,
4111 offsetof(struct vcl, conf)
4112 + ddf->conf_rec_len * 512) != 0)
4113 update->space = NULL;
4114 }
4115
4116 /*
4117 * Check if the array 'a' is degraded but not failed.
4118 * If it is, find as many spares as are available and needed and
4119 * arrange for their inclusion.
4120 * We only choose devices which are not already in the array,
4121 * and prefer those with a spare-assignment to this array.
4122 * otherwise we choose global spares - assuming always that
4123 * there is enough room.
4124 * For each spare that we assign, we return an 'mdinfo' which
4125 * describes the position for the device in the array.
4126 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4127 * the new phys_refnum and lba_offset values.
4128 *
4129 * Only worry about BVDs at the moment.
4130 */
4131 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4132 struct metadata_update **updates)
4133 {
4134 int working = 0;
4135 struct mdinfo *d;
4136 struct ddf_super *ddf = a->container->sb;
4137 int global_ok = 0;
4138 struct mdinfo *rv = NULL;
4139 struct mdinfo *di;
4140 struct metadata_update *mu;
4141 struct dl *dl;
4142 int i;
4143 struct vd_config *vc;
4144 __u64 *lba;
4145
4146 for (d = a->info.devs ; d ; d = d->next) {
4147 if ((d->curr_state & DS_FAULTY) &&
4148 d->state_fd >= 0)
4149 /* wait for Removal to happen */
4150 return NULL;
4151 if (d->state_fd >= 0)
4152 working ++;
4153 }
4154
4155 dprintf("ddf_activate: working=%d (%d) level=%d\n", working, a->info.array.raid_disks,
4156 a->info.array.level);
4157 if (working == a->info.array.raid_disks)
4158 return NULL; /* array not degraded */
4159 switch (a->info.array.level) {
4160 case 1:
4161 if (working == 0)
4162 return NULL; /* failed */
4163 break;
4164 case 4:
4165 case 5:
4166 if (working < a->info.array.raid_disks - 1)
4167 return NULL; /* failed */
4168 break;
4169 case 6:
4170 if (working < a->info.array.raid_disks - 2)
4171 return NULL; /* failed */
4172 break;
4173 default: /* concat or stripe */
4174 return NULL; /* failed */
4175 }
4176
4177 /* For each slot, if it is not working, find a spare */
4178 dl = ddf->dlist;
4179 for (i = 0; i < a->info.array.raid_disks; i++) {
4180 for (d = a->info.devs ; d ; d = d->next)
4181 if (d->disk.raid_disk == i)
4182 break;
4183 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4184 if (d && (d->state_fd >= 0))
4185 continue;
4186
4187 /* OK, this device needs recovery. Find a spare */
4188 again:
4189 for ( ; dl ; dl = dl->next) {
4190 unsigned long long esize;
4191 unsigned long long pos;
4192 struct mdinfo *d2;
4193 int is_global = 0;
4194 int is_dedicated = 0;
4195 struct extent *ex;
4196 unsigned int j;
4197 /* If in this array, skip */
4198 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4199 if (d2->state_fd >= 0 &&
4200 d2->disk.major == dl->major &&
4201 d2->disk.minor == dl->minor) {
4202 dprintf("%x:%x already in array\n", dl->major, dl->minor);
4203 break;
4204 }
4205 if (d2)
4206 continue;
4207 if (ddf->phys->entries[dl->pdnum].type &
4208 __cpu_to_be16(DDF_Spare)) {
4209 /* Check spare assign record */
4210 if (dl->spare) {
4211 if (dl->spare->type & DDF_spare_dedicated) {
4212 /* check spare_ents for guid */
4213 for (j = 0 ;
4214 j < __be16_to_cpu(dl->spare->populated);
4215 j++) {
4216 if (memcmp(dl->spare->spare_ents[j].guid,
4217 ddf->virt->entries[a->info.container_member].guid,
4218 DDF_GUID_LEN) == 0)
4219 is_dedicated = 1;
4220 }
4221 } else
4222 is_global = 1;
4223 }
4224 } else if (ddf->phys->entries[dl->pdnum].type &
4225 __cpu_to_be16(DDF_Global_Spare)) {
4226 is_global = 1;
4227 } else if (!(ddf->phys->entries[dl->pdnum].state &
4228 __cpu_to_be16(DDF_Failed))) {
4229 /* we can possibly use some of this */
4230 is_global = 1;
4231 }
4232 if ( ! (is_dedicated ||
4233 (is_global && global_ok))) {
4234 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4235 is_dedicated, is_global);
4236 continue;
4237 }
4238
4239 /* We are allowed to use this device - is there space?
4240 * We need a->info.component_size sectors */
4241 ex = get_extents(ddf, dl);
4242 if (!ex) {
4243 dprintf("cannot get extents\n");
4244 continue;
4245 }
4246 j = 0; pos = 0;
4247 esize = 0;
4248
4249 do {
4250 esize = ex[j].start - pos;
4251 if (esize >= a->info.component_size)
4252 break;
4253 pos = ex[j].start + ex[j].size;
4254 j++;
4255 } while (ex[j-1].size);
4256
4257 free(ex);
4258 if (esize < a->info.component_size) {
4259 dprintf("%x:%x has no room: %llu %llu\n",
4260 dl->major, dl->minor,
4261 esize, a->info.component_size);
4262 /* No room */
4263 continue;
4264 }
4265
4266 /* Cool, we have a device with some space at pos */
4267 di = xcalloc(1, sizeof(*di));
4268 di->disk.number = i;
4269 di->disk.raid_disk = i;
4270 di->disk.major = dl->major;
4271 di->disk.minor = dl->minor;
4272 di->disk.state = 0;
4273 di->recovery_start = 0;
4274 di->data_offset = pos;
4275 di->component_size = a->info.component_size;
4276 di->container_member = dl->pdnum;
4277 di->next = rv;
4278 rv = di;
4279 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
4280 i, pos);
4281
4282 break;
4283 }
4284 if (!dl && ! global_ok) {
4285 /* not enough dedicated spares, try global */
4286 global_ok = 1;
4287 dl = ddf->dlist;
4288 goto again;
4289 }
4290 }
4291
4292 if (!rv)
4293 /* No spares found */
4294 return rv;
4295 /* Now 'rv' has a list of devices to return.
4296 * Create a metadata_update record to update the
4297 * phys_refnum and lba_offset values
4298 */
4299 mu = xmalloc(sizeof(*mu));
4300 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
4301 free(mu);
4302 mu = NULL;
4303 }
4304 mu->buf = xmalloc(ddf->conf_rec_len * 512);
4305 mu->len = ddf->conf_rec_len * 512;
4306 mu->space = NULL;
4307 mu->space_list = NULL;
4308 mu->next = *updates;
4309 vc = find_vdcr(ddf, a->info.container_member);
4310 memcpy(mu->buf, vc, ddf->conf_rec_len * 512);
4311
4312 vc = (struct vd_config*)mu->buf;
4313 lba = (__u64*)&vc->phys_refnum[ddf->mppe];
4314 for (di = rv ; di ; di = di->next) {
4315 vc->phys_refnum[di->disk.raid_disk] =
4316 ddf->phys->entries[dl->pdnum].refnum;
4317 lba[di->disk.raid_disk] = di->data_offset;
4318 }
4319 *updates = mu;
4320 return rv;
4321 }
4322 #endif /* MDASSEMBLE */
4323
4324 static int ddf_level_to_layout(int level)
4325 {
4326 switch(level) {
4327 case 0:
4328 case 1:
4329 return 0;
4330 case 5:
4331 return ALGORITHM_LEFT_SYMMETRIC;
4332 case 6:
4333 return ALGORITHM_ROTATING_N_CONTINUE;
4334 case 10:
4335 return 0x102;
4336 default:
4337 return UnSet;
4338 }
4339 }
4340
4341 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
4342 {
4343 if (level && *level == UnSet)
4344 *level = LEVEL_CONTAINER;
4345
4346 if (level && layout && *layout == UnSet)
4347 *layout = ddf_level_to_layout(*level);
4348 }
4349
4350 struct superswitch super_ddf = {
4351 #ifndef MDASSEMBLE
4352 .examine_super = examine_super_ddf,
4353 .brief_examine_super = brief_examine_super_ddf,
4354 .brief_examine_subarrays = brief_examine_subarrays_ddf,
4355 .export_examine_super = export_examine_super_ddf,
4356 .detail_super = detail_super_ddf,
4357 .brief_detail_super = brief_detail_super_ddf,
4358 .validate_geometry = validate_geometry_ddf,
4359 .write_init_super = write_init_super_ddf,
4360 .add_to_super = add_to_super_ddf,
4361 .remove_from_super = remove_from_super_ddf,
4362 .load_container = load_container_ddf,
4363 .copy_metadata = copy_metadata_ddf,
4364 #endif
4365 .match_home = match_home_ddf,
4366 .uuid_from_super= uuid_from_super_ddf,
4367 .getinfo_super = getinfo_super_ddf,
4368 .update_super = update_super_ddf,
4369
4370 .avail_size = avail_size_ddf,
4371
4372 .compare_super = compare_super_ddf,
4373
4374 .load_super = load_super_ddf,
4375 .init_super = init_super_ddf,
4376 .store_super = store_super_ddf,
4377 .free_super = free_super_ddf,
4378 .match_metadata_desc = match_metadata_desc_ddf,
4379 .container_content = container_content_ddf,
4380 .default_geometry = default_geometry_ddf,
4381
4382 .external = 1,
4383
4384 #ifndef MDASSEMBLE
4385 /* for mdmon */
4386 .open_new = ddf_open_new,
4387 .set_array_state= ddf_set_array_state,
4388 .set_disk = ddf_set_disk,
4389 .sync_metadata = ddf_sync_metadata,
4390 .process_update = ddf_process_update,
4391 .prepare_update = ddf_prepare_update,
4392 .activate_spare = ddf_activate_spare,
4393 #endif
4394 .name = "ddf",
4395 };