]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
Move ARRAY_SIZE macro to common include file.
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* Default for safe_mode_delay. Same value as for IMSM.
51 */
52 static const int DDF_SAFE_MODE_DELAY = 4000;
53
54 /* The DDF metadata handling.
55 * DDF metadata lives at the end of the device.
56 * The last 512 byte block provides an 'anchor' which is used to locate
57 * the rest of the metadata which usually lives immediately behind the anchor.
58 *
59 * Note:
60 * - all multibyte numeric fields are bigendian.
61 * - all strings are space padded.
62 *
63 */
64
65 typedef struct __be16 {
66 __u16 _v16;
67 } be16;
68 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
69 #define be16_and(x, y) ((x)._v16 & (y)._v16)
70 #define be16_or(x, y) ((x)._v16 | (y)._v16)
71 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
72 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
73
74 typedef struct __be32 {
75 __u32 _v32;
76 } be32;
77 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
78
79 typedef struct __be64 {
80 __u64 _v64;
81 } be64;
82 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
83
84 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
85 static inline be16 cpu_to_be16(__u16 x)
86 {
87 be16 be = { ._v16 = __cpu_to_be16(x) };
88 return be;
89 }
90
91 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
92 static inline be32 cpu_to_be32(__u32 x)
93 {
94 be32 be = { ._v32 = __cpu_to_be32(x) };
95 return be;
96 }
97
98 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
99 static inline be64 cpu_to_be64(__u64 x)
100 {
101 be64 be = { ._v64 = __cpu_to_be64(x) };
102 return be;
103 }
104
105 /* Primary Raid Level (PRL) */
106 #define DDF_RAID0 0x00
107 #define DDF_RAID1 0x01
108 #define DDF_RAID3 0x03
109 #define DDF_RAID4 0x04
110 #define DDF_RAID5 0x05
111 #define DDF_RAID1E 0x11
112 #define DDF_JBOD 0x0f
113 #define DDF_CONCAT 0x1f
114 #define DDF_RAID5E 0x15
115 #define DDF_RAID5EE 0x25
116 #define DDF_RAID6 0x06
117
118 /* Raid Level Qualifier (RLQ) */
119 #define DDF_RAID0_SIMPLE 0x00
120 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
121 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
122 #define DDF_RAID3_0 0x00 /* parity in first extent */
123 #define DDF_RAID3_N 0x01 /* parity in last extent */
124 #define DDF_RAID4_0 0x00 /* parity in first extent */
125 #define DDF_RAID4_N 0x01 /* parity in last extent */
126 /* these apply to raid5e and raid5ee as well */
127 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
128 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
129 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
130 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
131
132 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
133 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
134
135 /* Secondary RAID Level (SRL) */
136 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
137 #define DDF_2MIRRORED 0x01
138 #define DDF_2CONCAT 0x02
139 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
140
141 /* Magic numbers */
142 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
143 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
144 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
145 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
146 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
147 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
148 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
149 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
150 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
151 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
152
153 #define DDF_GUID_LEN 24
154 #define DDF_REVISION_0 "01.00.00"
155 #define DDF_REVISION_2 "01.02.00"
156
157 struct ddf_header {
158 be32 magic; /* DDF_HEADER_MAGIC */
159 be32 crc;
160 char guid[DDF_GUID_LEN];
161 char revision[8]; /* 01.02.00 */
162 be32 seq; /* starts at '1' */
163 be32 timestamp;
164 __u8 openflag;
165 __u8 foreignflag;
166 __u8 enforcegroups;
167 __u8 pad0; /* 0xff */
168 __u8 pad1[12]; /* 12 * 0xff */
169 /* 64 bytes so far */
170 __u8 header_ext[32]; /* reserved: fill with 0xff */
171 be64 primary_lba;
172 be64 secondary_lba;
173 __u8 type;
174 __u8 pad2[3]; /* 0xff */
175 be32 workspace_len; /* sectors for vendor space -
176 * at least 32768(sectors) */
177 be64 workspace_lba;
178 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
179 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
180 be16 max_partitions; /* i.e. max num of configuration
181 record entries per disk */
182 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
183 *12/512) */
184 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
185 __u8 pad3[54]; /* 0xff */
186 /* 192 bytes so far */
187 be32 controller_section_offset;
188 be32 controller_section_length;
189 be32 phys_section_offset;
190 be32 phys_section_length;
191 be32 virt_section_offset;
192 be32 virt_section_length;
193 be32 config_section_offset;
194 be32 config_section_length;
195 be32 data_section_offset;
196 be32 data_section_length;
197 be32 bbm_section_offset;
198 be32 bbm_section_length;
199 be32 diag_space_offset;
200 be32 diag_space_length;
201 be32 vendor_offset;
202 be32 vendor_length;
203 /* 256 bytes so far */
204 __u8 pad4[256]; /* 0xff */
205 };
206
207 /* type field */
208 #define DDF_HEADER_ANCHOR 0x00
209 #define DDF_HEADER_PRIMARY 0x01
210 #define DDF_HEADER_SECONDARY 0x02
211
212 /* The content of the 'controller section' - global scope */
213 struct ddf_controller_data {
214 be32 magic; /* DDF_CONTROLLER_MAGIC */
215 be32 crc;
216 char guid[DDF_GUID_LEN];
217 struct controller_type {
218 be16 vendor_id;
219 be16 device_id;
220 be16 sub_vendor_id;
221 be16 sub_device_id;
222 } type;
223 char product_id[16];
224 __u8 pad[8]; /* 0xff */
225 __u8 vendor_data[448];
226 };
227
228 /* The content of phys_section - global scope */
229 struct phys_disk {
230 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
231 be32 crc;
232 be16 used_pdes;
233 be16 max_pdes;
234 __u8 pad[52];
235 struct phys_disk_entry {
236 char guid[DDF_GUID_LEN];
237 be32 refnum;
238 be16 type;
239 be16 state;
240 be64 config_size; /* DDF structures must be after here */
241 char path[18]; /* another horrible structure really */
242 __u8 pad[6];
243 } entries[0];
244 };
245
246 /* phys_disk_entry.type is a bitmap - bigendian remember */
247 #define DDF_Forced_PD_GUID 1
248 #define DDF_Active_in_VD 2
249 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
250 #define DDF_Spare 8 /* overrides Global_spare */
251 #define DDF_Foreign 16
252 #define DDF_Legacy 32 /* no DDF on this device */
253
254 #define DDF_Interface_mask 0xf00
255 #define DDF_Interface_SCSI 0x100
256 #define DDF_Interface_SAS 0x200
257 #define DDF_Interface_SATA 0x300
258 #define DDF_Interface_FC 0x400
259
260 /* phys_disk_entry.state is a bigendian bitmap */
261 #define DDF_Online 1
262 #define DDF_Failed 2 /* overrides 1,4,8 */
263 #define DDF_Rebuilding 4
264 #define DDF_Transition 8
265 #define DDF_SMART 16
266 #define DDF_ReadErrors 32
267 #define DDF_Missing 64
268
269 /* The content of the virt_section global scope */
270 struct virtual_disk {
271 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
272 be32 crc;
273 be16 populated_vdes;
274 be16 max_vdes;
275 __u8 pad[52];
276 struct virtual_entry {
277 char guid[DDF_GUID_LEN];
278 be16 unit;
279 __u16 pad0; /* 0xffff */
280 be16 guid_crc;
281 be16 type;
282 __u8 state;
283 __u8 init_state;
284 __u8 pad1[14];
285 char name[16];
286 } entries[0];
287 };
288
289 /* virtual_entry.type is a bitmap - bigendian */
290 #define DDF_Shared 1
291 #define DDF_Enforce_Groups 2
292 #define DDF_Unicode 4
293 #define DDF_Owner_Valid 8
294
295 /* virtual_entry.state is a bigendian bitmap */
296 #define DDF_state_mask 0x7
297 #define DDF_state_optimal 0x0
298 #define DDF_state_degraded 0x1
299 #define DDF_state_deleted 0x2
300 #define DDF_state_missing 0x3
301 #define DDF_state_failed 0x4
302 #define DDF_state_part_optimal 0x5
303
304 #define DDF_state_morphing 0x8
305 #define DDF_state_inconsistent 0x10
306
307 /* virtual_entry.init_state is a bigendian bitmap */
308 #define DDF_initstate_mask 0x03
309 #define DDF_init_not 0x00
310 #define DDF_init_quick 0x01 /* initialisation is progress.
311 * i.e. 'state_inconsistent' */
312 #define DDF_init_full 0x02
313
314 #define DDF_access_mask 0xc0
315 #define DDF_access_rw 0x00
316 #define DDF_access_ro 0x80
317 #define DDF_access_blocked 0xc0
318
319 /* The content of the config_section - local scope
320 * It has multiple records each config_record_len sectors
321 * They can be vd_config or spare_assign
322 */
323
324 struct vd_config {
325 be32 magic; /* DDF_VD_CONF_MAGIC */
326 be32 crc;
327 char guid[DDF_GUID_LEN];
328 be32 timestamp;
329 be32 seqnum;
330 __u8 pad0[24];
331 be16 prim_elmnt_count;
332 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
333 __u8 prl;
334 __u8 rlq;
335 __u8 sec_elmnt_count;
336 __u8 sec_elmnt_seq;
337 __u8 srl;
338 be64 blocks; /* blocks per component could be different
339 * on different component devices...(only
340 * for concat I hope) */
341 be64 array_blocks; /* blocks in array */
342 __u8 pad1[8];
343 be32 spare_refs[8];
344 __u8 cache_pol[8];
345 __u8 bg_rate;
346 __u8 pad2[3];
347 __u8 pad3[52];
348 __u8 pad4[192];
349 __u8 v0[32]; /* reserved- 0xff */
350 __u8 v1[32]; /* reserved- 0xff */
351 __u8 v2[16]; /* reserved- 0xff */
352 __u8 v3[16]; /* reserved- 0xff */
353 __u8 vendor[32];
354 be32 phys_refnum[0]; /* refnum of each disk in sequence */
355 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
356 bvd are always the same size */
357 };
358 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
359
360 /* vd_config.cache_pol[7] is a bitmap */
361 #define DDF_cache_writeback 1 /* else writethrough */
362 #define DDF_cache_wadaptive 2 /* only applies if writeback */
363 #define DDF_cache_readahead 4
364 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
365 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
366 #define DDF_cache_wallowed 32 /* enable write caching */
367 #define DDF_cache_rallowed 64 /* enable read caching */
368
369 struct spare_assign {
370 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
371 be32 crc;
372 be32 timestamp;
373 __u8 reserved[7];
374 __u8 type;
375 be16 populated; /* SAEs used */
376 be16 max; /* max SAEs */
377 __u8 pad[8];
378 struct spare_assign_entry {
379 char guid[DDF_GUID_LEN];
380 be16 secondary_element;
381 __u8 pad[6];
382 } spare_ents[0];
383 };
384 /* spare_assign.type is a bitmap */
385 #define DDF_spare_dedicated 0x1 /* else global */
386 #define DDF_spare_revertible 0x2 /* else committable */
387 #define DDF_spare_active 0x4 /* else not active */
388 #define DDF_spare_affinity 0x8 /* enclosure affinity */
389
390 /* The data_section contents - local scope */
391 struct disk_data {
392 be32 magic; /* DDF_PHYS_DATA_MAGIC */
393 be32 crc;
394 char guid[DDF_GUID_LEN];
395 be32 refnum; /* crc of some magic drive data ... */
396 __u8 forced_ref; /* set when above was not result of magic */
397 __u8 forced_guid; /* set if guid was forced rather than magic */
398 __u8 vendor[32];
399 __u8 pad[442];
400 };
401
402 /* bbm_section content */
403 struct bad_block_log {
404 be32 magic;
405 be32 crc;
406 be16 entry_count;
407 be32 spare_count;
408 __u8 pad[10];
409 be64 first_spare;
410 struct mapped_block {
411 be64 defective_start;
412 be32 replacement_start;
413 be16 remap_count;
414 __u8 pad[2];
415 } entries[0];
416 };
417
418 /* Struct for internally holding ddf structures */
419 /* The DDF structure stored on each device is potentially
420 * quite different, as some data is global and some is local.
421 * The global data is:
422 * - ddf header
423 * - controller_data
424 * - Physical disk records
425 * - Virtual disk records
426 * The local data is:
427 * - Configuration records
428 * - Physical Disk data section
429 * ( and Bad block and vendor which I don't care about yet).
430 *
431 * The local data is parsed into separate lists as it is read
432 * and reconstructed for writing. This means that we only need
433 * to make config changes once and they are automatically
434 * propagated to all devices.
435 * Note that the ddf_super has space of the conf and disk data
436 * for this disk and also for a list of all such data.
437 * The list is only used for the superblock that is being
438 * built in Create or Assemble to describe the whole array.
439 */
440 struct ddf_super {
441 struct ddf_header anchor, primary, secondary;
442 struct ddf_controller_data controller;
443 struct ddf_header *active;
444 struct phys_disk *phys;
445 struct virtual_disk *virt;
446 char *conf;
447 int pdsize, vdsize;
448 unsigned int max_part, mppe, conf_rec_len;
449 int currentdev;
450 int updates_pending;
451 struct vcl {
452 union {
453 char space[512];
454 struct {
455 struct vcl *next;
456 unsigned int vcnum; /* index into ->virt */
457 struct vd_config **other_bvds;
458 __u64 *block_sizes; /* NULL if all the same */
459 };
460 };
461 struct vd_config conf;
462 } *conflist, *currentconf;
463 struct dl {
464 union {
465 char space[512];
466 struct {
467 struct dl *next;
468 int major, minor;
469 char *devname;
470 int fd;
471 unsigned long long size; /* sectors */
472 be64 primary_lba; /* sectors */
473 be64 secondary_lba; /* sectors */
474 be64 workspace_lba; /* sectors */
475 int pdnum; /* index in ->phys */
476 struct spare_assign *spare;
477 void *mdupdate; /* hold metadata update */
478
479 /* These fields used by auto-layout */
480 int raiddisk; /* slot to fill in autolayout */
481 __u64 esize;
482 };
483 };
484 struct disk_data disk;
485 struct vcl *vlist[0]; /* max_part in size */
486 } *dlist, *add_list;
487 };
488
489 #ifndef offsetof
490 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
491 #endif
492
493 #if DEBUG
494 static int all_ff(const char *guid);
495 static void pr_state(struct ddf_super *ddf, const char *msg)
496 {
497 unsigned int i;
498 dprintf("%s/%s: ", __func__, msg);
499 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
500 if (all_ff(ddf->virt->entries[i].guid))
501 continue;
502 dprintf("%u(s=%02x i=%02x) ", i,
503 ddf->virt->entries[i].state,
504 ddf->virt->entries[i].init_state);
505 }
506 dprintf("\n");
507 }
508 #else
509 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
510 #endif
511
512 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
513 {
514 ddf->updates_pending = 1;
515 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
516 pr_state(ddf, func);
517 }
518
519 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
520
521 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
522 be32 refnum, unsigned int nmax,
523 const struct vd_config **bvd,
524 unsigned int *idx);
525
526 static be32 calc_crc(void *buf, int len)
527 {
528 /* crcs are always at the same place as in the ddf_header */
529 struct ddf_header *ddf = buf;
530 be32 oldcrc = ddf->crc;
531 __u32 newcrc;
532 ddf->crc = cpu_to_be32(0xffffffff);
533
534 newcrc = crc32(0, buf, len);
535 ddf->crc = oldcrc;
536 /* The crc is store (like everything) bigendian, so convert
537 * here for simplicity
538 */
539 return cpu_to_be32(newcrc);
540 }
541
542 #define DDF_INVALID_LEVEL 0xff
543 #define DDF_NO_SECONDARY 0xff
544 static int err_bad_md_layout(const mdu_array_info_t *array)
545 {
546 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
547 array->level, array->layout, array->raid_disks);
548 return -1;
549 }
550
551 static int layout_md2ddf(const mdu_array_info_t *array,
552 struct vd_config *conf)
553 {
554 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
555 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
556 __u8 sec_elmnt_count = 1;
557 __u8 srl = DDF_NO_SECONDARY;
558
559 switch (array->level) {
560 case LEVEL_LINEAR:
561 prl = DDF_CONCAT;
562 break;
563 case 0:
564 rlq = DDF_RAID0_SIMPLE;
565 prl = DDF_RAID0;
566 break;
567 case 1:
568 switch (array->raid_disks) {
569 case 2:
570 rlq = DDF_RAID1_SIMPLE;
571 break;
572 case 3:
573 rlq = DDF_RAID1_MULTI;
574 break;
575 default:
576 return err_bad_md_layout(array);
577 }
578 prl = DDF_RAID1;
579 break;
580 case 4:
581 if (array->layout != 0)
582 return err_bad_md_layout(array);
583 rlq = DDF_RAID4_N;
584 prl = DDF_RAID4;
585 break;
586 case 5:
587 switch (array->layout) {
588 case ALGORITHM_LEFT_ASYMMETRIC:
589 rlq = DDF_RAID5_N_RESTART;
590 break;
591 case ALGORITHM_RIGHT_ASYMMETRIC:
592 rlq = DDF_RAID5_0_RESTART;
593 break;
594 case ALGORITHM_LEFT_SYMMETRIC:
595 rlq = DDF_RAID5_N_CONTINUE;
596 break;
597 case ALGORITHM_RIGHT_SYMMETRIC:
598 /* not mentioned in standard */
599 default:
600 return err_bad_md_layout(array);
601 }
602 prl = DDF_RAID5;
603 break;
604 case 6:
605 switch (array->layout) {
606 case ALGORITHM_ROTATING_N_RESTART:
607 rlq = DDF_RAID5_N_RESTART;
608 break;
609 case ALGORITHM_ROTATING_ZERO_RESTART:
610 rlq = DDF_RAID6_0_RESTART;
611 break;
612 case ALGORITHM_ROTATING_N_CONTINUE:
613 rlq = DDF_RAID5_N_CONTINUE;
614 break;
615 default:
616 return err_bad_md_layout(array);
617 }
618 prl = DDF_RAID6;
619 break;
620 case 10:
621 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
622 rlq = DDF_RAID1_SIMPLE;
623 prim_elmnt_count = cpu_to_be16(2);
624 sec_elmnt_count = array->raid_disks / 2;
625 } else if (array->raid_disks % 3 == 0
626 && array->layout == 0x103) {
627 rlq = DDF_RAID1_MULTI;
628 prim_elmnt_count = cpu_to_be16(3);
629 sec_elmnt_count = array->raid_disks / 3;
630 } else
631 return err_bad_md_layout(array);
632 srl = DDF_2SPANNED;
633 prl = DDF_RAID1;
634 break;
635 default:
636 return err_bad_md_layout(array);
637 }
638 conf->prl = prl;
639 conf->prim_elmnt_count = prim_elmnt_count;
640 conf->rlq = rlq;
641 conf->srl = srl;
642 conf->sec_elmnt_count = sec_elmnt_count;
643 return 0;
644 }
645
646 static int err_bad_ddf_layout(const struct vd_config *conf)
647 {
648 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
649 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
650 return -1;
651 }
652
653 static int layout_ddf2md(const struct vd_config *conf,
654 mdu_array_info_t *array)
655 {
656 int level = LEVEL_UNSUPPORTED;
657 int layout = 0;
658 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
659
660 if (conf->sec_elmnt_count > 1) {
661 /* see also check_secondary() */
662 if (conf->prl != DDF_RAID1 ||
663 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
664 pr_err("Unsupported secondary RAID level %u/%u\n",
665 conf->prl, conf->srl);
666 return -1;
667 }
668 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
669 layout = 0x102;
670 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
671 layout = 0x103;
672 else
673 return err_bad_ddf_layout(conf);
674 raiddisks *= conf->sec_elmnt_count;
675 level = 10;
676 goto good;
677 }
678
679 switch (conf->prl) {
680 case DDF_CONCAT:
681 level = LEVEL_LINEAR;
682 break;
683 case DDF_RAID0:
684 if (conf->rlq != DDF_RAID0_SIMPLE)
685 return err_bad_ddf_layout(conf);
686 level = 0;
687 break;
688 case DDF_RAID1:
689 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
690 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
691 return err_bad_ddf_layout(conf);
692 level = 1;
693 break;
694 case DDF_RAID4:
695 if (conf->rlq != DDF_RAID4_N)
696 return err_bad_ddf_layout(conf);
697 level = 4;
698 break;
699 case DDF_RAID5:
700 switch (conf->rlq) {
701 case DDF_RAID5_N_RESTART:
702 layout = ALGORITHM_LEFT_ASYMMETRIC;
703 break;
704 case DDF_RAID5_0_RESTART:
705 layout = ALGORITHM_RIGHT_ASYMMETRIC;
706 break;
707 case DDF_RAID5_N_CONTINUE:
708 layout = ALGORITHM_LEFT_SYMMETRIC;
709 break;
710 default:
711 return err_bad_ddf_layout(conf);
712 }
713 level = 5;
714 break;
715 case DDF_RAID6:
716 switch (conf->rlq) {
717 case DDF_RAID5_N_RESTART:
718 layout = ALGORITHM_ROTATING_N_RESTART;
719 break;
720 case DDF_RAID6_0_RESTART:
721 layout = ALGORITHM_ROTATING_ZERO_RESTART;
722 break;
723 case DDF_RAID5_N_CONTINUE:
724 layout = ALGORITHM_ROTATING_N_CONTINUE;
725 break;
726 default:
727 return err_bad_ddf_layout(conf);
728 }
729 level = 6;
730 break;
731 default:
732 return err_bad_ddf_layout(conf);
733 };
734
735 good:
736 array->level = level;
737 array->layout = layout;
738 array->raid_disks = raiddisks;
739 return 0;
740 }
741
742 static int load_ddf_header(int fd, unsigned long long lba,
743 unsigned long long size,
744 int type,
745 struct ddf_header *hdr, struct ddf_header *anchor)
746 {
747 /* read a ddf header (primary or secondary) from fd/lba
748 * and check that it is consistent with anchor
749 * Need to check:
750 * magic, crc, guid, rev, and LBA's header_type, and
751 * everything after header_type must be the same
752 */
753 if (lba >= size-1)
754 return 0;
755
756 if (lseek64(fd, lba<<9, 0) < 0)
757 return 0;
758
759 if (read(fd, hdr, 512) != 512)
760 return 0;
761
762 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
763 pr_err("%s: bad header magic\n", __func__);
764 return 0;
765 }
766 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
767 pr_err("%s: bad CRC\n", __func__);
768 return 0;
769 }
770 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
771 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
772 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
773 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
774 hdr->type != type ||
775 memcmp(anchor->pad2, hdr->pad2, 512 -
776 offsetof(struct ddf_header, pad2)) != 0) {
777 pr_err("%s: header mismatch\n", __func__);
778 return 0;
779 }
780
781 /* Looks good enough to me... */
782 return 1;
783 }
784
785 static void *load_section(int fd, struct ddf_super *super, void *buf,
786 be32 offset_be, be32 len_be, int check)
787 {
788 unsigned long long offset = be32_to_cpu(offset_be);
789 unsigned long long len = be32_to_cpu(len_be);
790 int dofree = (buf == NULL);
791
792 if (check)
793 if (len != 2 && len != 8 && len != 32
794 && len != 128 && len != 512)
795 return NULL;
796
797 if (len > 1024)
798 return NULL;
799 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
800 buf = NULL;
801
802 if (!buf)
803 return NULL;
804
805 if (super->active->type == 1)
806 offset += be64_to_cpu(super->active->primary_lba);
807 else
808 offset += be64_to_cpu(super->active->secondary_lba);
809
810 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
811 if (dofree)
812 free(buf);
813 return NULL;
814 }
815 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
816 if (dofree)
817 free(buf);
818 return NULL;
819 }
820 return buf;
821 }
822
823 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
824 {
825 unsigned long long dsize;
826
827 get_dev_size(fd, NULL, &dsize);
828
829 if (lseek64(fd, dsize-512, 0) < 0) {
830 if (devname)
831 pr_err("Cannot seek to anchor block on %s: %s\n",
832 devname, strerror(errno));
833 return 1;
834 }
835 if (read(fd, &super->anchor, 512) != 512) {
836 if (devname)
837 pr_err("Cannot read anchor block on %s: %s\n",
838 devname, strerror(errno));
839 return 1;
840 }
841 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
842 if (devname)
843 pr_err("no DDF anchor found on %s\n",
844 devname);
845 return 2;
846 }
847 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
848 if (devname)
849 pr_err("bad CRC on anchor on %s\n",
850 devname);
851 return 2;
852 }
853 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
854 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
855 if (devname)
856 pr_err("can only support super revision"
857 " %.8s and earlier, not %.8s on %s\n",
858 DDF_REVISION_2, super->anchor.revision,devname);
859 return 2;
860 }
861 super->active = NULL;
862 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
863 dsize >> 9, 1,
864 &super->primary, &super->anchor) == 0) {
865 if (devname)
866 pr_err("Failed to load primary DDF header "
867 "on %s\n", devname);
868 } else
869 super->active = &super->primary;
870
871 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
872 dsize >> 9, 2,
873 &super->secondary, &super->anchor)) {
874 if (super->active == NULL
875 || (be32_to_cpu(super->primary.seq)
876 < be32_to_cpu(super->secondary.seq) &&
877 !super->secondary.openflag)
878 || (be32_to_cpu(super->primary.seq)
879 == be32_to_cpu(super->secondary.seq) &&
880 super->primary.openflag && !super->secondary.openflag)
881 )
882 super->active = &super->secondary;
883 } else if (devname)
884 pr_err("Failed to load secondary DDF header on %s\n",
885 devname);
886 if (super->active == NULL)
887 return 2;
888 return 0;
889 }
890
891 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
892 {
893 void *ok;
894 ok = load_section(fd, super, &super->controller,
895 super->active->controller_section_offset,
896 super->active->controller_section_length,
897 0);
898 super->phys = load_section(fd, super, NULL,
899 super->active->phys_section_offset,
900 super->active->phys_section_length,
901 1);
902 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
903
904 super->virt = load_section(fd, super, NULL,
905 super->active->virt_section_offset,
906 super->active->virt_section_length,
907 1);
908 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
909 if (!ok ||
910 !super->phys ||
911 !super->virt) {
912 free(super->phys);
913 free(super->virt);
914 super->phys = NULL;
915 super->virt = NULL;
916 return 2;
917 }
918 super->conflist = NULL;
919 super->dlist = NULL;
920
921 super->max_part = be16_to_cpu(super->active->max_partitions);
922 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
923 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
924 return 0;
925 }
926
927 #define DDF_UNUSED_BVD 0xff
928 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
929 {
930 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
931 unsigned int i, vdsize;
932 void *p;
933 if (n_vds == 0) {
934 vcl->other_bvds = NULL;
935 return 0;
936 }
937 vdsize = ddf->conf_rec_len * 512;
938 if (posix_memalign(&p, 512, n_vds *
939 (vdsize + sizeof(struct vd_config *))) != 0)
940 return -1;
941 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
942 for (i = 0; i < n_vds; i++) {
943 vcl->other_bvds[i] = p + i * vdsize;
944 memset(vcl->other_bvds[i], 0, vdsize);
945 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
946 }
947 return 0;
948 }
949
950 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
951 unsigned int len)
952 {
953 int i;
954 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
955 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
956 break;
957
958 if (i < vcl->conf.sec_elmnt_count-1) {
959 if (be32_to_cpu(vd->seqnum) <=
960 be32_to_cpu(vcl->other_bvds[i]->seqnum))
961 return;
962 } else {
963 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
964 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
965 break;
966 if (i == vcl->conf.sec_elmnt_count-1) {
967 pr_err("no space for sec level config %u, count is %u\n",
968 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
969 return;
970 }
971 }
972 memcpy(vcl->other_bvds[i], vd, len);
973 }
974
975 static int load_ddf_local(int fd, struct ddf_super *super,
976 char *devname, int keep)
977 {
978 struct dl *dl;
979 struct stat stb;
980 char *conf;
981 unsigned int i;
982 unsigned int confsec;
983 int vnum;
984 unsigned int max_virt_disks = be16_to_cpu
985 (super->active->max_vd_entries);
986 unsigned long long dsize;
987
988 /* First the local disk info */
989 if (posix_memalign((void**)&dl, 512,
990 sizeof(*dl) +
991 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
992 pr_err("%s could not allocate disk info buffer\n",
993 __func__);
994 return 1;
995 }
996
997 load_section(fd, super, &dl->disk,
998 super->active->data_section_offset,
999 super->active->data_section_length,
1000 0);
1001 dl->devname = devname ? xstrdup(devname) : NULL;
1002
1003 fstat(fd, &stb);
1004 dl->major = major(stb.st_rdev);
1005 dl->minor = minor(stb.st_rdev);
1006 dl->next = super->dlist;
1007 dl->fd = keep ? fd : -1;
1008
1009 dl->size = 0;
1010 if (get_dev_size(fd, devname, &dsize))
1011 dl->size = dsize >> 9;
1012 /* If the disks have different sizes, the LBAs will differ
1013 * between phys disks.
1014 * At this point here, the values in super->active must be valid
1015 * for this phys disk. */
1016 dl->primary_lba = super->active->primary_lba;
1017 dl->secondary_lba = super->active->secondary_lba;
1018 dl->workspace_lba = super->active->workspace_lba;
1019 dl->spare = NULL;
1020 for (i = 0 ; i < super->max_part ; i++)
1021 dl->vlist[i] = NULL;
1022 super->dlist = dl;
1023 dl->pdnum = -1;
1024 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1025 if (memcmp(super->phys->entries[i].guid,
1026 dl->disk.guid, DDF_GUID_LEN) == 0)
1027 dl->pdnum = i;
1028
1029 /* Now the config list. */
1030 /* 'conf' is an array of config entries, some of which are
1031 * probably invalid. Those which are good need to be copied into
1032 * the conflist
1033 */
1034
1035 conf = load_section(fd, super, super->conf,
1036 super->active->config_section_offset,
1037 super->active->config_section_length,
1038 0);
1039 super->conf = conf;
1040 vnum = 0;
1041 for (confsec = 0;
1042 confsec < be32_to_cpu(super->active->config_section_length);
1043 confsec += super->conf_rec_len) {
1044 struct vd_config *vd =
1045 (struct vd_config *)((char*)conf + confsec*512);
1046 struct vcl *vcl;
1047
1048 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1049 if (dl->spare)
1050 continue;
1051 if (posix_memalign((void**)&dl->spare, 512,
1052 super->conf_rec_len*512) != 0) {
1053 pr_err("%s could not allocate spare info buf\n",
1054 __func__);
1055 return 1;
1056 }
1057
1058 memcpy(dl->spare, vd, super->conf_rec_len*512);
1059 continue;
1060 }
1061 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1062 continue;
1063 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1064 if (memcmp(vcl->conf.guid,
1065 vd->guid, DDF_GUID_LEN) == 0)
1066 break;
1067 }
1068
1069 if (vcl) {
1070 dl->vlist[vnum++] = vcl;
1071 if (vcl->other_bvds != NULL &&
1072 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1073 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1074 continue;
1075 }
1076 if (be32_to_cpu(vd->seqnum) <=
1077 be32_to_cpu(vcl->conf.seqnum))
1078 continue;
1079 } else {
1080 if (posix_memalign((void**)&vcl, 512,
1081 (super->conf_rec_len*512 +
1082 offsetof(struct vcl, conf))) != 0) {
1083 pr_err("%s could not allocate vcl buf\n",
1084 __func__);
1085 return 1;
1086 }
1087 vcl->next = super->conflist;
1088 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1089 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1090 if (alloc_other_bvds(super, vcl) != 0) {
1091 pr_err("%s could not allocate other bvds\n",
1092 __func__);
1093 free(vcl);
1094 return 1;
1095 };
1096 super->conflist = vcl;
1097 dl->vlist[vnum++] = vcl;
1098 }
1099 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1100 for (i=0; i < max_virt_disks ; i++)
1101 if (memcmp(super->virt->entries[i].guid,
1102 vcl->conf.guid, DDF_GUID_LEN)==0)
1103 break;
1104 if (i < max_virt_disks)
1105 vcl->vcnum = i;
1106 }
1107
1108 return 0;
1109 }
1110
1111 #ifndef MDASSEMBLE
1112 static int load_super_ddf_all(struct supertype *st, int fd,
1113 void **sbp, char *devname);
1114 #endif
1115
1116 static void free_super_ddf(struct supertype *st);
1117
1118 static int load_super_ddf(struct supertype *st, int fd,
1119 char *devname)
1120 {
1121 unsigned long long dsize;
1122 struct ddf_super *super;
1123 int rv;
1124
1125 if (get_dev_size(fd, devname, &dsize) == 0)
1126 return 1;
1127
1128 if (!st->ignore_hw_compat && test_partition(fd))
1129 /* DDF is not allowed on partitions */
1130 return 1;
1131
1132 /* 32M is a lower bound */
1133 if (dsize <= 32*1024*1024) {
1134 if (devname)
1135 pr_err("%s is too small for ddf: "
1136 "size is %llu sectors.\n",
1137 devname, dsize>>9);
1138 return 1;
1139 }
1140 if (dsize & 511) {
1141 if (devname)
1142 pr_err("%s is an odd size for ddf: "
1143 "size is %llu bytes.\n",
1144 devname, dsize);
1145 return 1;
1146 }
1147
1148 free_super_ddf(st);
1149
1150 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1151 pr_err("malloc of %zu failed.\n",
1152 sizeof(*super));
1153 return 1;
1154 }
1155 memset(super, 0, sizeof(*super));
1156
1157 rv = load_ddf_headers(fd, super, devname);
1158 if (rv) {
1159 free(super);
1160 return rv;
1161 }
1162
1163 /* Have valid headers and have chosen the best. Let's read in the rest*/
1164
1165 rv = load_ddf_global(fd, super, devname);
1166
1167 if (rv) {
1168 if (devname)
1169 pr_err("Failed to load all information "
1170 "sections on %s\n", devname);
1171 free(super);
1172 return rv;
1173 }
1174
1175 rv = load_ddf_local(fd, super, devname, 0);
1176
1177 if (rv) {
1178 if (devname)
1179 pr_err("Failed to load all information "
1180 "sections on %s\n", devname);
1181 free(super);
1182 return rv;
1183 }
1184
1185 /* Should possibly check the sections .... */
1186
1187 st->sb = super;
1188 if (st->ss == NULL) {
1189 st->ss = &super_ddf;
1190 st->minor_version = 0;
1191 st->max_devs = 512;
1192 }
1193 return 0;
1194
1195 }
1196
1197 static void free_super_ddf(struct supertype *st)
1198 {
1199 struct ddf_super *ddf = st->sb;
1200 if (ddf == NULL)
1201 return;
1202 free(ddf->phys);
1203 free(ddf->virt);
1204 free(ddf->conf);
1205 while (ddf->conflist) {
1206 struct vcl *v = ddf->conflist;
1207 ddf->conflist = v->next;
1208 if (v->block_sizes)
1209 free(v->block_sizes);
1210 if (v->other_bvds)
1211 /*
1212 v->other_bvds[0] points to beginning of buffer,
1213 see alloc_other_bvds()
1214 */
1215 free(v->other_bvds[0]);
1216 free(v);
1217 }
1218 while (ddf->dlist) {
1219 struct dl *d = ddf->dlist;
1220 ddf->dlist = d->next;
1221 if (d->fd >= 0)
1222 close(d->fd);
1223 if (d->spare)
1224 free(d->spare);
1225 free(d);
1226 }
1227 while (ddf->add_list) {
1228 struct dl *d = ddf->add_list;
1229 ddf->add_list = d->next;
1230 if (d->fd >= 0)
1231 close(d->fd);
1232 if (d->spare)
1233 free(d->spare);
1234 free(d);
1235 }
1236 free(ddf);
1237 st->sb = NULL;
1238 }
1239
1240 static struct supertype *match_metadata_desc_ddf(char *arg)
1241 {
1242 /* 'ddf' only support containers */
1243 struct supertype *st;
1244 if (strcmp(arg, "ddf") != 0 &&
1245 strcmp(arg, "default") != 0
1246 )
1247 return NULL;
1248
1249 st = xcalloc(1, sizeof(*st));
1250 st->ss = &super_ddf;
1251 st->max_devs = 512;
1252 st->minor_version = 0;
1253 st->sb = NULL;
1254 return st;
1255 }
1256
1257 #ifndef MDASSEMBLE
1258
1259 static mapping_t ddf_state[] = {
1260 { "Optimal", 0},
1261 { "Degraded", 1},
1262 { "Deleted", 2},
1263 { "Missing", 3},
1264 { "Failed", 4},
1265 { "Partially Optimal", 5},
1266 { "-reserved-", 6},
1267 { "-reserved-", 7},
1268 { NULL, 0}
1269 };
1270
1271 static mapping_t ddf_init_state[] = {
1272 { "Not Initialised", 0},
1273 { "QuickInit in Progress", 1},
1274 { "Fully Initialised", 2},
1275 { "*UNKNOWN*", 3},
1276 { NULL, 0}
1277 };
1278 static mapping_t ddf_access[] = {
1279 { "Read/Write", 0},
1280 { "Reserved", 1},
1281 { "Read Only", 2},
1282 { "Blocked (no access)", 3},
1283 { NULL ,0}
1284 };
1285
1286 static mapping_t ddf_level[] = {
1287 { "RAID0", DDF_RAID0},
1288 { "RAID1", DDF_RAID1},
1289 { "RAID3", DDF_RAID3},
1290 { "RAID4", DDF_RAID4},
1291 { "RAID5", DDF_RAID5},
1292 { "RAID1E",DDF_RAID1E},
1293 { "JBOD", DDF_JBOD},
1294 { "CONCAT",DDF_CONCAT},
1295 { "RAID5E",DDF_RAID5E},
1296 { "RAID5EE",DDF_RAID5EE},
1297 { "RAID6", DDF_RAID6},
1298 { NULL, 0}
1299 };
1300 static mapping_t ddf_sec_level[] = {
1301 { "Striped", DDF_2STRIPED},
1302 { "Mirrored", DDF_2MIRRORED},
1303 { "Concat", DDF_2CONCAT},
1304 { "Spanned", DDF_2SPANNED},
1305 { NULL, 0}
1306 };
1307 #endif
1308
1309 static int all_ff(const char *guid)
1310 {
1311 int i;
1312 for (i = 0; i < DDF_GUID_LEN; i++)
1313 if (guid[i] != (char)0xff)
1314 return 0;
1315 return 1;
1316 }
1317
1318 static const char *guid_str(const char *guid)
1319 {
1320 static char buf[DDF_GUID_LEN*2+1];
1321 int i;
1322 char *p = buf;
1323 for (i = 0; i < DDF_GUID_LEN; i++) {
1324 unsigned char c = guid[i];
1325 if (c >= 32 && c < 127)
1326 p += sprintf(p, "%c", c);
1327 else
1328 p += sprintf(p, "%02x", c);
1329 }
1330 *p = '\0';
1331 return (const char *) buf;
1332 }
1333
1334 #ifndef MDASSEMBLE
1335 static void print_guid(char *guid, int tstamp)
1336 {
1337 /* A GUIDs are part (or all) ASCII and part binary.
1338 * They tend to be space padded.
1339 * We print the GUID in HEX, then in parentheses add
1340 * any initial ASCII sequence, and a possible
1341 * time stamp from bytes 16-19
1342 */
1343 int l = DDF_GUID_LEN;
1344 int i;
1345
1346 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1347 if ((i&3)==0 && i != 0) printf(":");
1348 printf("%02X", guid[i]&255);
1349 }
1350
1351 printf("\n (");
1352 while (l && guid[l-1] == ' ')
1353 l--;
1354 for (i=0 ; i<l ; i++) {
1355 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1356 fputc(guid[i], stdout);
1357 else
1358 break;
1359 }
1360 if (tstamp) {
1361 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1362 char tbuf[100];
1363 struct tm *tm;
1364 tm = localtime(&then);
1365 strftime(tbuf, 100, " %D %T",tm);
1366 fputs(tbuf, stdout);
1367 }
1368 printf(")");
1369 }
1370
1371 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1372 {
1373 int crl = sb->conf_rec_len;
1374 struct vcl *vcl;
1375
1376 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1377 unsigned int i;
1378 struct vd_config *vc = &vcl->conf;
1379
1380 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1381 continue;
1382 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1383 continue;
1384
1385 /* Ok, we know about this VD, let's give more details */
1386 printf(" Raid Devices[%d] : %d (", n,
1387 be16_to_cpu(vc->prim_elmnt_count));
1388 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1389 int j;
1390 int cnt = be16_to_cpu(sb->phys->used_pdes);
1391 for (j=0; j<cnt; j++)
1392 if (be32_eq(vc->phys_refnum[i],
1393 sb->phys->entries[j].refnum))
1394 break;
1395 if (i) printf(" ");
1396 if (j < cnt)
1397 printf("%d", j);
1398 else
1399 printf("--");
1400 }
1401 printf(")\n");
1402 if (vc->chunk_shift != 255)
1403 printf(" Chunk Size[%d] : %d sectors\n", n,
1404 1 << vc->chunk_shift);
1405 printf(" Raid Level[%d] : %s\n", n,
1406 map_num(ddf_level, vc->prl)?:"-unknown-");
1407 if (vc->sec_elmnt_count != 1) {
1408 printf(" Secondary Position[%d] : %d of %d\n", n,
1409 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1410 printf(" Secondary Level[%d] : %s\n", n,
1411 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1412 }
1413 printf(" Device Size[%d] : %llu\n", n,
1414 be64_to_cpu(vc->blocks)/2);
1415 printf(" Array Size[%d] : %llu\n", n,
1416 be64_to_cpu(vc->array_blocks)/2);
1417 }
1418 }
1419
1420 static void examine_vds(struct ddf_super *sb)
1421 {
1422 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1423 unsigned int i;
1424 printf(" Virtual Disks : %d\n", cnt);
1425
1426 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1427 struct virtual_entry *ve = &sb->virt->entries[i];
1428 if (all_ff(ve->guid))
1429 continue;
1430 printf("\n");
1431 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1432 printf("\n");
1433 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1434 printf(" state[%d] : %s, %s%s\n", i,
1435 map_num(ddf_state, ve->state & 7),
1436 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1437 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1438 printf(" init state[%d] : %s\n", i,
1439 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1440 printf(" access[%d] : %s\n", i,
1441 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1442 printf(" Name[%d] : %.16s\n", i, ve->name);
1443 examine_vd(i, sb, ve->guid);
1444 }
1445 if (cnt) printf("\n");
1446 }
1447
1448 static void examine_pds(struct ddf_super *sb)
1449 {
1450 int cnt = be16_to_cpu(sb->phys->used_pdes);
1451 int i;
1452 struct dl *dl;
1453 printf(" Physical Disks : %d\n", cnt);
1454 printf(" Number RefNo Size Device Type/State\n");
1455
1456 for (i=0 ; i<cnt ; i++) {
1457 struct phys_disk_entry *pd = &sb->phys->entries[i];
1458 int type = be16_to_cpu(pd->type);
1459 int state = be16_to_cpu(pd->state);
1460
1461 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1462 //printf("\n");
1463 printf(" %3d %08x ", i,
1464 be32_to_cpu(pd->refnum));
1465 printf("%8lluK ",
1466 be64_to_cpu(pd->config_size)>>1);
1467 for (dl = sb->dlist; dl ; dl = dl->next) {
1468 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1469 char *dv = map_dev(dl->major, dl->minor, 0);
1470 if (dv) {
1471 printf("%-15s", dv);
1472 break;
1473 }
1474 }
1475 }
1476 if (!dl)
1477 printf("%15s","");
1478 printf(" %s%s%s%s%s",
1479 (type&2) ? "active":"",
1480 (type&4) ? "Global-Spare":"",
1481 (type&8) ? "spare" : "",
1482 (type&16)? ", foreign" : "",
1483 (type&32)? "pass-through" : "");
1484 if (state & DDF_Failed)
1485 /* This over-rides these three */
1486 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1487 printf("/%s%s%s%s%s%s%s",
1488 (state&1)? "Online": "Offline",
1489 (state&2)? ", Failed": "",
1490 (state&4)? ", Rebuilding": "",
1491 (state&8)? ", in-transition": "",
1492 (state&16)? ", SMART-errors": "",
1493 (state&32)? ", Unrecovered-Read-Errors": "",
1494 (state&64)? ", Missing" : "");
1495 printf("\n");
1496 }
1497 }
1498
1499 static void examine_super_ddf(struct supertype *st, char *homehost)
1500 {
1501 struct ddf_super *sb = st->sb;
1502
1503 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1504 printf(" Version : %.8s\n", sb->anchor.revision);
1505 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1506 printf("\n");
1507 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1508 printf("\n");
1509 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1510 printf(" Redundant hdr : %s\n", be32_eq(sb->secondary.magic,
1511 DDF_HEADER_MAGIC)
1512 ?"yes" : "no");
1513 examine_vds(sb);
1514 examine_pds(sb);
1515 }
1516
1517 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1518
1519 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1520 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1521
1522 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1523 {
1524 /*
1525 * Figure out the VD number for this supertype.
1526 * Returns DDF_CONTAINER for the container itself,
1527 * and DDF_NOTFOUND on error.
1528 */
1529 struct ddf_super *ddf = st->sb;
1530 struct mdinfo *sra;
1531 char *sub, *end;
1532 unsigned int vcnum;
1533
1534 if (*st->container_devnm == '\0')
1535 return DDF_CONTAINER;
1536
1537 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1538 if (!sra || sra->array.major_version != -1 ||
1539 sra->array.minor_version != -2 ||
1540 !is_subarray(sra->text_version))
1541 return DDF_NOTFOUND;
1542
1543 sub = strchr(sra->text_version + 1, '/');
1544 if (sub != NULL)
1545 vcnum = strtoul(sub + 1, &end, 10);
1546 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1547 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1548 return DDF_NOTFOUND;
1549
1550 return vcnum;
1551 }
1552
1553 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1554 {
1555 /* We just write a generic DDF ARRAY entry
1556 */
1557 struct mdinfo info;
1558 char nbuf[64];
1559 getinfo_super_ddf(st, &info, NULL);
1560 fname_from_uuid(st, &info, nbuf, ':');
1561
1562 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1563 }
1564
1565 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1566 {
1567 /* We just write a generic DDF ARRAY entry
1568 */
1569 struct ddf_super *ddf = st->sb;
1570 struct mdinfo info;
1571 unsigned int i;
1572 char nbuf[64];
1573 getinfo_super_ddf(st, &info, NULL);
1574 fname_from_uuid(st, &info, nbuf, ':');
1575
1576 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1577 struct virtual_entry *ve = &ddf->virt->entries[i];
1578 struct vcl vcl;
1579 char nbuf1[64];
1580 if (all_ff(ve->guid))
1581 continue;
1582 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1583 ddf->currentconf =&vcl;
1584 vcl.vcnum = i;
1585 uuid_from_super_ddf(st, info.uuid);
1586 fname_from_uuid(st, &info, nbuf1, ':');
1587 printf("ARRAY container=%s member=%d UUID=%s\n",
1588 nbuf+5, i, nbuf1+5);
1589 }
1590 }
1591
1592 static void export_examine_super_ddf(struct supertype *st)
1593 {
1594 struct mdinfo info;
1595 char nbuf[64];
1596 getinfo_super_ddf(st, &info, NULL);
1597 fname_from_uuid(st, &info, nbuf, ':');
1598 printf("MD_METADATA=ddf\n");
1599 printf("MD_LEVEL=container\n");
1600 printf("MD_UUID=%s\n", nbuf+5);
1601 printf("MD_DEVICES=%u\n",
1602 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1603 }
1604
1605 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1606 {
1607 void *buf;
1608 unsigned long long dsize, offset;
1609 int bytes;
1610 struct ddf_header *ddf;
1611 int written = 0;
1612
1613 /* The meta consists of an anchor, a primary, and a secondary.
1614 * This all lives at the end of the device.
1615 * So it is easiest to find the earliest of primary and
1616 * secondary, and copy everything from there.
1617 *
1618 * Anchor is 512 from end It contains primary_lba and secondary_lba
1619 * we choose one of those
1620 */
1621
1622 if (posix_memalign(&buf, 4096, 4096) != 0)
1623 return 1;
1624
1625 if (!get_dev_size(from, NULL, &dsize))
1626 goto err;
1627
1628 if (lseek64(from, dsize-512, 0) < 0)
1629 goto err;
1630 if (read(from, buf, 512) != 512)
1631 goto err;
1632 ddf = buf;
1633 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1634 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1635 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1636 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1637 goto err;
1638
1639 offset = dsize - 512;
1640 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1641 offset = be64_to_cpu(ddf->primary_lba) << 9;
1642 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1643 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1644
1645 bytes = dsize - offset;
1646
1647 if (lseek64(from, offset, 0) < 0 ||
1648 lseek64(to, offset, 0) < 0)
1649 goto err;
1650 while (written < bytes) {
1651 int n = bytes - written;
1652 if (n > 4096)
1653 n = 4096;
1654 if (read(from, buf, n) != n)
1655 goto err;
1656 if (write(to, buf, n) != n)
1657 goto err;
1658 written += n;
1659 }
1660 free(buf);
1661 return 0;
1662 err:
1663 free(buf);
1664 return 1;
1665 }
1666
1667 static void detail_super_ddf(struct supertype *st, char *homehost)
1668 {
1669 /* FIXME later
1670 * Could print DDF GUID
1671 * Need to find which array
1672 * If whole, briefly list all arrays
1673 * If one, give name
1674 */
1675 }
1676
1677 static const char *vendors_with_variable_volume_UUID[] = {
1678 "LSI ",
1679 };
1680
1681 static int volume_id_is_reliable(const struct ddf_super *ddf)
1682 {
1683 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1684 int i;
1685 for (i = 0; i < n; i++)
1686 if (!memcmp(ddf->controller.guid,
1687 vendors_with_variable_volume_UUID[i], 8))
1688 return 0;
1689 return 1;
1690 }
1691
1692 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1693 unsigned int vcnum, int uuid[4])
1694 {
1695 char buf[DDF_GUID_LEN+18], sha[20], *p;
1696 struct sha1_ctx ctx;
1697 if (volume_id_is_reliable(ddf)) {
1698 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1699 return;
1700 }
1701 /*
1702 * Some fake RAID BIOSes (in particular, LSI ones) change the
1703 * VD GUID at every boot. These GUIDs are not suitable for
1704 * identifying an array. Luckily the header GUID appears to
1705 * remain constant.
1706 * We construct a pseudo-UUID from the header GUID and those
1707 * properties of the subarray that we expect to remain constant.
1708 */
1709 memset(buf, 0, sizeof(buf));
1710 p = buf;
1711 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1712 p += DDF_GUID_LEN;
1713 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1714 p += 16;
1715 *((__u16 *) p) = vcnum;
1716 sha1_init_ctx(&ctx);
1717 sha1_process_bytes(buf, sizeof(buf), &ctx);
1718 sha1_finish_ctx(&ctx, sha);
1719 memcpy(uuid, sha, 4*4);
1720 }
1721
1722 static void brief_detail_super_ddf(struct supertype *st)
1723 {
1724 struct mdinfo info;
1725 char nbuf[64];
1726 struct ddf_super *ddf = st->sb;
1727 unsigned int vcnum = get_vd_num_of_subarray(st);
1728 if (vcnum == DDF_CONTAINER)
1729 uuid_from_super_ddf(st, info.uuid);
1730 else if (vcnum == DDF_NOTFOUND)
1731 return;
1732 else
1733 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1734 fname_from_uuid(st, &info, nbuf,':');
1735 printf(" UUID=%s", nbuf + 5);
1736 }
1737 #endif
1738
1739 static int match_home_ddf(struct supertype *st, char *homehost)
1740 {
1741 /* It matches 'this' host if the controller is a
1742 * Linux-MD controller with vendor_data matching
1743 * the hostname
1744 */
1745 struct ddf_super *ddf = st->sb;
1746 unsigned int len;
1747
1748 if (!homehost)
1749 return 0;
1750 len = strlen(homehost);
1751
1752 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1753 len < sizeof(ddf->controller.vendor_data) &&
1754 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1755 ddf->controller.vendor_data[len] == 0);
1756 }
1757
1758 #ifndef MDASSEMBLE
1759 static int find_index_in_bvd(const struct ddf_super *ddf,
1760 const struct vd_config *conf, unsigned int n,
1761 unsigned int *n_bvd)
1762 {
1763 /*
1764 * Find the index of the n-th valid physical disk in this BVD
1765 */
1766 unsigned int i, j;
1767 for (i = 0, j = 0; i < ddf->mppe &&
1768 j < be16_to_cpu(conf->prim_elmnt_count); i++) {
1769 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1770 if (n == j) {
1771 *n_bvd = i;
1772 return 1;
1773 }
1774 j++;
1775 }
1776 }
1777 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1778 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1779 return 0;
1780 }
1781
1782 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1783 unsigned int n,
1784 unsigned int *n_bvd, struct vcl **vcl)
1785 {
1786 struct vcl *v;
1787
1788 for (v = ddf->conflist; v; v = v->next) {
1789 unsigned int nsec, ibvd = 0;
1790 struct vd_config *conf;
1791 if (inst != v->vcnum)
1792 continue;
1793 conf = &v->conf;
1794 if (conf->sec_elmnt_count == 1) {
1795 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1796 *vcl = v;
1797 return conf;
1798 } else
1799 goto bad;
1800 }
1801 if (v->other_bvds == NULL) {
1802 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1803 __func__, conf->sec_elmnt_count);
1804 goto bad;
1805 }
1806 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1807 if (conf->sec_elmnt_seq != nsec) {
1808 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1809 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1810 == nsec)
1811 break;
1812 }
1813 if (ibvd == conf->sec_elmnt_count)
1814 goto bad;
1815 conf = v->other_bvds[ibvd-1];
1816 }
1817 if (!find_index_in_bvd(ddf, conf,
1818 n - nsec*conf->sec_elmnt_count, n_bvd))
1819 goto bad;
1820 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1821 , __func__, n, *n_bvd, ibvd, inst);
1822 *vcl = v;
1823 return conf;
1824 }
1825 bad:
1826 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1827 return NULL;
1828 }
1829 #endif
1830
1831 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1832 {
1833 /* Find the entry in phys_disk which has the given refnum
1834 * and return it's index
1835 */
1836 unsigned int i;
1837 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1838 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1839 return i;
1840 return -1;
1841 }
1842
1843 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1844 {
1845 char buf[20];
1846 struct sha1_ctx ctx;
1847 sha1_init_ctx(&ctx);
1848 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1849 sha1_finish_ctx(&ctx, buf);
1850 memcpy(uuid, buf, 4*4);
1851 }
1852
1853 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1854 {
1855 /* The uuid returned here is used for:
1856 * uuid to put into bitmap file (Create, Grow)
1857 * uuid for backup header when saving critical section (Grow)
1858 * comparing uuids when re-adding a device into an array
1859 * In these cases the uuid required is that of the data-array,
1860 * not the device-set.
1861 * uuid to recognise same set when adding a missing device back
1862 * to an array. This is a uuid for the device-set.
1863 *
1864 * For each of these we can make do with a truncated
1865 * or hashed uuid rather than the original, as long as
1866 * everyone agrees.
1867 * In the case of SVD we assume the BVD is of interest,
1868 * though that might be the case if a bitmap were made for
1869 * a mirrored SVD - worry about that later.
1870 * So we need to find the VD configuration record for the
1871 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1872 * The first 16 bytes of the sha1 of these is used.
1873 */
1874 struct ddf_super *ddf = st->sb;
1875 struct vcl *vcl = ddf->currentconf;
1876
1877 if (vcl)
1878 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1879 else
1880 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1881 }
1882
1883 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1884
1885 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1886 {
1887 struct ddf_super *ddf = st->sb;
1888 int map_disks = info->array.raid_disks;
1889 __u32 *cptr;
1890
1891 if (ddf->currentconf) {
1892 getinfo_super_ddf_bvd(st, info, map);
1893 return;
1894 }
1895 memset(info, 0, sizeof(*info));
1896
1897 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1898 info->array.level = LEVEL_CONTAINER;
1899 info->array.layout = 0;
1900 info->array.md_minor = -1;
1901 cptr = (__u32 *)(ddf->anchor.guid + 16);
1902 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1903
1904 info->array.utime = 0;
1905 info->array.chunk_size = 0;
1906 info->container_enough = 1;
1907
1908 info->disk.major = 0;
1909 info->disk.minor = 0;
1910 if (ddf->dlist) {
1911 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1912 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1913
1914 info->data_offset = be64_to_cpu(ddf->phys->
1915 entries[info->disk.raid_disk].
1916 config_size);
1917 info->component_size = ddf->dlist->size - info->data_offset;
1918 } else {
1919 info->disk.number = -1;
1920 info->disk.raid_disk = -1;
1921 // info->disk.raid_disk = find refnum in the table and use index;
1922 }
1923 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1924
1925 info->recovery_start = MaxSector;
1926 info->reshape_active = 0;
1927 info->recovery_blocked = 0;
1928 info->name[0] = 0;
1929
1930 info->array.major_version = -1;
1931 info->array.minor_version = -2;
1932 strcpy(info->text_version, "ddf");
1933 info->safe_mode_delay = 0;
1934
1935 uuid_from_super_ddf(st, info->uuid);
1936
1937 if (map) {
1938 int i;
1939 for (i = 0 ; i < map_disks; i++) {
1940 if (i < info->array.raid_disks &&
1941 (be16_to_cpu(ddf->phys->entries[i].state)
1942 & DDF_Online) &&
1943 !(be16_to_cpu(ddf->phys->entries[i].state)
1944 & DDF_Failed))
1945 map[i] = 1;
1946 else
1947 map[i] = 0;
1948 }
1949 }
1950 }
1951
1952 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1953 {
1954 struct ddf_super *ddf = st->sb;
1955 struct vcl *vc = ddf->currentconf;
1956 int cd = ddf->currentdev;
1957 int n_prim;
1958 int j;
1959 struct dl *dl;
1960 int map_disks = info->array.raid_disks;
1961 __u32 *cptr;
1962 struct vd_config *conf;
1963
1964 memset(info, 0, sizeof(*info));
1965 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1966 return;
1967 info->array.md_minor = -1;
1968 cptr = (__u32 *)(vc->conf.guid + 16);
1969 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1970 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
1971 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1972 info->custom_array_size = 0;
1973
1974 conf = &vc->conf;
1975 n_prim = be16_to_cpu(conf->prim_elmnt_count);
1976 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
1977 int ibvd = cd / n_prim - 1;
1978 cd %= n_prim;
1979 conf = vc->other_bvds[ibvd];
1980 }
1981
1982 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1983 info->data_offset =
1984 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
1985 if (vc->block_sizes)
1986 info->component_size = vc->block_sizes[cd];
1987 else
1988 info->component_size = be64_to_cpu(conf->blocks);
1989 }
1990
1991 for (dl = ddf->dlist; dl ; dl = dl->next)
1992 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
1993 break;
1994
1995 info->disk.major = 0;
1996 info->disk.minor = 0;
1997 info->disk.state = 0;
1998 if (dl) {
1999 info->disk.major = dl->major;
2000 info->disk.minor = dl->minor;
2001 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2002 * be16_to_cpu(conf->prim_elmnt_count);
2003 info->disk.number = dl->pdnum;
2004 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2005 }
2006
2007 info->container_member = ddf->currentconf->vcnum;
2008
2009 info->recovery_start = MaxSector;
2010 info->resync_start = 0;
2011 info->reshape_active = 0;
2012 info->recovery_blocked = 0;
2013 if (!(ddf->virt->entries[info->container_member].state
2014 & DDF_state_inconsistent) &&
2015 (ddf->virt->entries[info->container_member].init_state
2016 & DDF_initstate_mask)
2017 == DDF_init_full)
2018 info->resync_start = MaxSector;
2019
2020 uuid_from_super_ddf(st, info->uuid);
2021
2022 info->array.major_version = -1;
2023 info->array.minor_version = -2;
2024 sprintf(info->text_version, "/%s/%d",
2025 st->container_devnm,
2026 info->container_member);
2027 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2028
2029 memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
2030 info->name[16]=0;
2031 for(j=0; j<16; j++)
2032 if (info->name[j] == ' ')
2033 info->name[j] = 0;
2034
2035 if (map)
2036 for (j = 0; j < map_disks; j++) {
2037 map[j] = 0;
2038 if (j < info->array.raid_disks) {
2039 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2040 if (i >= 0 &&
2041 (be16_to_cpu(ddf->phys->entries[i].state)
2042 & DDF_Online) &&
2043 !(be16_to_cpu(ddf->phys->entries[i].state)
2044 & DDF_Failed))
2045 map[i] = 1;
2046 }
2047 }
2048 }
2049
2050 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2051 char *update,
2052 char *devname, int verbose,
2053 int uuid_set, char *homehost)
2054 {
2055 /* For 'assemble' and 'force' we need to return non-zero if any
2056 * change was made. For others, the return value is ignored.
2057 * Update options are:
2058 * force-one : This device looks a bit old but needs to be included,
2059 * update age info appropriately.
2060 * assemble: clear any 'faulty' flag to allow this device to
2061 * be assembled.
2062 * force-array: Array is degraded but being forced, mark it clean
2063 * if that will be needed to assemble it.
2064 *
2065 * newdev: not used ????
2066 * grow: Array has gained a new device - this is currently for
2067 * linear only
2068 * resync: mark as dirty so a resync will happen.
2069 * uuid: Change the uuid of the array to match what is given
2070 * homehost: update the recorded homehost
2071 * name: update the name - preserving the homehost
2072 * _reshape_progress: record new reshape_progress position.
2073 *
2074 * Following are not relevant for this version:
2075 * sparc2.2 : update from old dodgey metadata
2076 * super-minor: change the preferred_minor number
2077 * summaries: update redundant counters.
2078 */
2079 int rv = 0;
2080 // struct ddf_super *ddf = st->sb;
2081 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2082 // struct virtual_entry *ve = find_ve(ddf);
2083
2084 /* we don't need to handle "force-*" or "assemble" as
2085 * there is no need to 'trick' the kernel. We the metadata is
2086 * first updated to activate the array, all the implied modifications
2087 * will just happen.
2088 */
2089
2090 if (strcmp(update, "grow") == 0) {
2091 /* FIXME */
2092 } else if (strcmp(update, "resync") == 0) {
2093 // info->resync_checkpoint = 0;
2094 } else if (strcmp(update, "homehost") == 0) {
2095 /* homehost is stored in controller->vendor_data,
2096 * or it is when we are the vendor
2097 */
2098 // if (info->vendor_is_local)
2099 // strcpy(ddf->controller.vendor_data, homehost);
2100 rv = -1;
2101 } else if (strcmp(update, "name") == 0) {
2102 /* name is stored in virtual_entry->name */
2103 // memset(ve->name, ' ', 16);
2104 // strncpy(ve->name, info->name, 16);
2105 rv = -1;
2106 } else if (strcmp(update, "_reshape_progress") == 0) {
2107 /* We don't support reshape yet */
2108 } else if (strcmp(update, "assemble") == 0 ) {
2109 /* Do nothing, just succeed */
2110 rv = 0;
2111 } else
2112 rv = -1;
2113
2114 // update_all_csum(ddf);
2115
2116 return rv;
2117 }
2118
2119 static void make_header_guid(char *guid)
2120 {
2121 be32 stamp;
2122 /* Create a DDF Header of Virtual Disk GUID */
2123
2124 /* 24 bytes of fiction required.
2125 * first 8 are a 'vendor-id' - "Linux-MD"
2126 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2127 * Remaining 8 random number plus timestamp
2128 */
2129 memcpy(guid, T10, sizeof(T10));
2130 stamp = cpu_to_be32(0xdeadbeef);
2131 memcpy(guid+8, &stamp, 4);
2132 stamp = cpu_to_be32(0);
2133 memcpy(guid+12, &stamp, 4);
2134 stamp = cpu_to_be32(time(0) - DECADE);
2135 memcpy(guid+16, &stamp, 4);
2136 stamp._v32 = random32();
2137 memcpy(guid+20, &stamp, 4);
2138 }
2139
2140 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2141 {
2142 unsigned int i;
2143 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2144 if (all_ff(ddf->virt->entries[i].guid))
2145 return i;
2146 }
2147 return DDF_NOTFOUND;
2148 }
2149
2150 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2151 const char *name)
2152 {
2153 unsigned int i;
2154 if (name == NULL)
2155 return DDF_NOTFOUND;
2156 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2157 if (all_ff(ddf->virt->entries[i].guid))
2158 continue;
2159 if (!strncmp(name, ddf->virt->entries[i].name,
2160 sizeof(ddf->virt->entries[i].name)))
2161 return i;
2162 }
2163 return DDF_NOTFOUND;
2164 }
2165
2166 #ifndef MDASSEMBLE
2167 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2168 const char *guid)
2169 {
2170 unsigned int i;
2171 if (guid == NULL || all_ff(guid))
2172 return DDF_NOTFOUND;
2173 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2174 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2175 return i;
2176 return DDF_NOTFOUND;
2177 }
2178 #endif
2179
2180 static int init_super_ddf_bvd(struct supertype *st,
2181 mdu_array_info_t *info,
2182 unsigned long long size,
2183 char *name, char *homehost,
2184 int *uuid, unsigned long long data_offset);
2185
2186 static int init_super_ddf(struct supertype *st,
2187 mdu_array_info_t *info,
2188 unsigned long long size, char *name, char *homehost,
2189 int *uuid, unsigned long long data_offset)
2190 {
2191 /* This is primarily called by Create when creating a new array.
2192 * We will then get add_to_super called for each component, and then
2193 * write_init_super called to write it out to each device.
2194 * For DDF, Create can create on fresh devices or on a pre-existing
2195 * array.
2196 * To create on a pre-existing array a different method will be called.
2197 * This one is just for fresh drives.
2198 *
2199 * We need to create the entire 'ddf' structure which includes:
2200 * DDF headers - these are easy.
2201 * Controller data - a Sector describing this controller .. not that
2202 * this is a controller exactly.
2203 * Physical Disk Record - one entry per device, so
2204 * leave plenty of space.
2205 * Virtual Disk Records - again, just leave plenty of space.
2206 * This just lists VDs, doesn't give details
2207 * Config records - describes the VDs that use this disk
2208 * DiskData - describes 'this' device.
2209 * BadBlockManagement - empty
2210 * Diag Space - empty
2211 * Vendor Logs - Could we put bitmaps here?
2212 *
2213 */
2214 struct ddf_super *ddf;
2215 char hostname[17];
2216 int hostlen;
2217 int max_phys_disks, max_virt_disks;
2218 unsigned long long sector;
2219 int clen;
2220 int i;
2221 int pdsize, vdsize;
2222 struct phys_disk *pd;
2223 struct virtual_disk *vd;
2224
2225 if (data_offset != INVALID_SECTORS) {
2226 pr_err("data-offset not supported by DDF\n");
2227 return 0;
2228 }
2229
2230 if (st->sb)
2231 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2232 data_offset);
2233
2234 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2235 pr_err("%s could not allocate superblock\n", __func__);
2236 return 0;
2237 }
2238 memset(ddf, 0, sizeof(*ddf));
2239 ddf->dlist = NULL; /* no physical disks yet */
2240 ddf->conflist = NULL; /* No virtual disks yet */
2241 st->sb = ddf;
2242
2243 if (info == NULL) {
2244 /* zeroing superblock */
2245 return 0;
2246 }
2247
2248 /* At least 32MB *must* be reserved for the ddf. So let's just
2249 * start 32MB from the end, and put the primary header there.
2250 * Don't do secondary for now.
2251 * We don't know exactly where that will be yet as it could be
2252 * different on each device. To just set up the lengths.
2253 *
2254 */
2255
2256 ddf->anchor.magic = DDF_HEADER_MAGIC;
2257 make_header_guid(ddf->anchor.guid);
2258
2259 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2260 ddf->anchor.seq = cpu_to_be32(1);
2261 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2262 ddf->anchor.openflag = 0xFF;
2263 ddf->anchor.foreignflag = 0;
2264 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2265 ddf->anchor.pad0 = 0xff;
2266 memset(ddf->anchor.pad1, 0xff, 12);
2267 memset(ddf->anchor.header_ext, 0xff, 32);
2268 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2269 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2270 ddf->anchor.type = DDF_HEADER_ANCHOR;
2271 memset(ddf->anchor.pad2, 0xff, 3);
2272 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2273 /* Put this at bottom of 32M reserved.. */
2274 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2275 max_phys_disks = 1023; /* Should be enough */
2276 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2277 max_virt_disks = 255;
2278 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks); /* ?? */
2279 ddf->anchor.max_partitions = cpu_to_be16(64); /* ?? */
2280 ddf->max_part = 64;
2281 ddf->mppe = 256;
2282 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2283 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2284 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2285 memset(ddf->anchor.pad3, 0xff, 54);
2286 /* controller sections is one sector long immediately
2287 * after the ddf header */
2288 sector = 1;
2289 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2290 ddf->anchor.controller_section_length = cpu_to_be32(1);
2291 sector += 1;
2292
2293 /* phys is 8 sectors after that */
2294 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2295 sizeof(struct phys_disk_entry)*max_phys_disks,
2296 512);
2297 switch(pdsize/512) {
2298 case 2: case 8: case 32: case 128: case 512: break;
2299 default: abort();
2300 }
2301 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2302 ddf->anchor.phys_section_length =
2303 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2304 sector += pdsize/512;
2305
2306 /* virt is another 32 sectors */
2307 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2308 sizeof(struct virtual_entry) * max_virt_disks,
2309 512);
2310 switch(vdsize/512) {
2311 case 2: case 8: case 32: case 128: case 512: break;
2312 default: abort();
2313 }
2314 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2315 ddf->anchor.virt_section_length =
2316 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2317 sector += vdsize/512;
2318
2319 clen = ddf->conf_rec_len * (ddf->max_part+1);
2320 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2321 ddf->anchor.config_section_length = cpu_to_be32(clen);
2322 sector += clen;
2323
2324 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2325 ddf->anchor.data_section_length = cpu_to_be32(1);
2326 sector += 1;
2327
2328 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2329 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2330 ddf->anchor.diag_space_length = cpu_to_be32(0);
2331 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2332 ddf->anchor.vendor_length = cpu_to_be32(0);
2333 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2334
2335 memset(ddf->anchor.pad4, 0xff, 256);
2336
2337 memcpy(&ddf->primary, &ddf->anchor, 512);
2338 memcpy(&ddf->secondary, &ddf->anchor, 512);
2339
2340 ddf->primary.openflag = 1; /* I guess.. */
2341 ddf->primary.type = DDF_HEADER_PRIMARY;
2342
2343 ddf->secondary.openflag = 1; /* I guess.. */
2344 ddf->secondary.type = DDF_HEADER_SECONDARY;
2345
2346 ddf->active = &ddf->primary;
2347
2348 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2349
2350 /* 24 more bytes of fiction required.
2351 * first 8 are a 'vendor-id' - "Linux-MD"
2352 * Remaining 16 are serial number.... maybe a hostname would do?
2353 */
2354 memcpy(ddf->controller.guid, T10, sizeof(T10));
2355 gethostname(hostname, sizeof(hostname));
2356 hostname[sizeof(hostname) - 1] = 0;
2357 hostlen = strlen(hostname);
2358 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2359 for (i = strlen(T10) ; i+hostlen < 24; i++)
2360 ddf->controller.guid[i] = ' ';
2361
2362 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2363 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2364 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2365 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2366 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2367 memset(ddf->controller.pad, 0xff, 8);
2368 memset(ddf->controller.vendor_data, 0xff, 448);
2369 if (homehost && strlen(homehost) < 440)
2370 strcpy((char*)ddf->controller.vendor_data, homehost);
2371
2372 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2373 pr_err("%s could not allocate pd\n", __func__);
2374 return 0;
2375 }
2376 ddf->phys = pd;
2377 ddf->pdsize = pdsize;
2378
2379 memset(pd, 0xff, pdsize);
2380 memset(pd, 0, sizeof(*pd));
2381 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2382 pd->used_pdes = cpu_to_be16(0);
2383 pd->max_pdes = cpu_to_be16(max_phys_disks);
2384 memset(pd->pad, 0xff, 52);
2385 for (i = 0; i < max_phys_disks; i++)
2386 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2387
2388 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2389 pr_err("%s could not allocate vd\n", __func__);
2390 return 0;
2391 }
2392 ddf->virt = vd;
2393 ddf->vdsize = vdsize;
2394 memset(vd, 0, vdsize);
2395 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2396 vd->populated_vdes = cpu_to_be16(0);
2397 vd->max_vdes = cpu_to_be16(max_virt_disks);
2398 memset(vd->pad, 0xff, 52);
2399
2400 for (i=0; i<max_virt_disks; i++)
2401 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2402
2403 st->sb = ddf;
2404 ddf_set_updates_pending(ddf);
2405 return 1;
2406 }
2407
2408 static int chunk_to_shift(int chunksize)
2409 {
2410 return ffs(chunksize/512)-1;
2411 }
2412
2413 #ifndef MDASSEMBLE
2414 struct extent {
2415 unsigned long long start, size;
2416 };
2417 static int cmp_extent(const void *av, const void *bv)
2418 {
2419 const struct extent *a = av;
2420 const struct extent *b = bv;
2421 if (a->start < b->start)
2422 return -1;
2423 if (a->start > b->start)
2424 return 1;
2425 return 0;
2426 }
2427
2428 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2429 {
2430 /* find a list of used extents on the give physical device
2431 * (dnum) of the given ddf.
2432 * Return a malloced array of 'struct extent'
2433
2434 * FIXME ignore DDF_Legacy devices?
2435
2436 */
2437 struct extent *rv;
2438 int n = 0;
2439 unsigned int i;
2440 __u16 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2441
2442 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2443 return NULL;
2444
2445 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2446
2447 for (i = 0; i < ddf->max_part; i++) {
2448 const struct vd_config *bvd;
2449 unsigned int ibvd;
2450 struct vcl *v = dl->vlist[i];
2451 if (v == NULL ||
2452 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2453 &bvd, &ibvd) == DDF_NOTFOUND)
2454 continue;
2455 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2456 rv[n].size = be64_to_cpu(bvd->blocks);
2457 n++;
2458 }
2459 qsort(rv, n, sizeof(*rv), cmp_extent);
2460
2461 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2462 rv[n].size = 0;
2463 return rv;
2464 }
2465 #endif
2466
2467 static int init_super_ddf_bvd(struct supertype *st,
2468 mdu_array_info_t *info,
2469 unsigned long long size,
2470 char *name, char *homehost,
2471 int *uuid, unsigned long long data_offset)
2472 {
2473 /* We are creating a BVD inside a pre-existing container.
2474 * so st->sb is already set.
2475 * We need to create a new vd_config and a new virtual_entry
2476 */
2477 struct ddf_super *ddf = st->sb;
2478 unsigned int venum, i;
2479 struct virtual_entry *ve;
2480 struct vcl *vcl;
2481 struct vd_config *vc;
2482
2483 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2484 pr_err("This ddf already has an array called %s\n", name);
2485 return 0;
2486 }
2487 venum = find_unused_vde(ddf);
2488 if (venum == DDF_NOTFOUND) {
2489 pr_err("Cannot find spare slot for virtual disk\n");
2490 return 0;
2491 }
2492 ve = &ddf->virt->entries[venum];
2493
2494 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2495 * timestamp, random number
2496 */
2497 make_header_guid(ve->guid);
2498 ve->unit = cpu_to_be16(info->md_minor);
2499 ve->pad0 = 0xFFFF;
2500 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2501 DDF_GUID_LEN);
2502 ve->type = cpu_to_be16(0);
2503 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2504 if (info->state & 1) /* clean */
2505 ve->init_state = DDF_init_full;
2506 else
2507 ve->init_state = DDF_init_not;
2508
2509 memset(ve->pad1, 0xff, 14);
2510 memset(ve->name, ' ', 16);
2511 if (name)
2512 strncpy(ve->name, name, 16);
2513 ddf->virt->populated_vdes =
2514 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2515
2516 /* Now create a new vd_config */
2517 if (posix_memalign((void**)&vcl, 512,
2518 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2519 pr_err("%s could not allocate vd_config\n", __func__);
2520 return 0;
2521 }
2522 vcl->vcnum = venum;
2523 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2524 vc = &vcl->conf;
2525
2526 vc->magic = DDF_VD_CONF_MAGIC;
2527 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2528 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2529 vc->seqnum = cpu_to_be32(1);
2530 memset(vc->pad0, 0xff, 24);
2531 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2532 if (layout_md2ddf(info, vc) == -1 ||
2533 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2534 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2535 __func__, info->level, info->layout, info->raid_disks);
2536 free(vcl);
2537 return 0;
2538 }
2539 vc->sec_elmnt_seq = 0;
2540 if (alloc_other_bvds(ddf, vcl) != 0) {
2541 pr_err("%s could not allocate other bvds\n",
2542 __func__);
2543 free(vcl);
2544 return 0;
2545 }
2546 vc->blocks = cpu_to_be64(info->size * 2);
2547 vc->array_blocks = cpu_to_be64(
2548 calc_array_size(info->level, info->raid_disks, info->layout,
2549 info->chunk_size, info->size*2));
2550 memset(vc->pad1, 0xff, 8);
2551 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2552 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2553 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2554 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2555 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2556 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2557 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2558 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2559 memset(vc->cache_pol, 0, 8);
2560 vc->bg_rate = 0x80;
2561 memset(vc->pad2, 0xff, 3);
2562 memset(vc->pad3, 0xff, 52);
2563 memset(vc->pad4, 0xff, 192);
2564 memset(vc->v0, 0xff, 32);
2565 memset(vc->v1, 0xff, 32);
2566 memset(vc->v2, 0xff, 16);
2567 memset(vc->v3, 0xff, 16);
2568 memset(vc->vendor, 0xff, 32);
2569
2570 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2571 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2572
2573 for (i = 1; i < vc->sec_elmnt_count; i++) {
2574 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2575 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2576 }
2577
2578 vcl->next = ddf->conflist;
2579 ddf->conflist = vcl;
2580 ddf->currentconf = vcl;
2581 ddf_set_updates_pending(ddf);
2582 return 1;
2583 }
2584
2585
2586 #ifndef MDASSEMBLE
2587 static int get_svd_state(const struct ddf_super *, const struct vcl *);
2588
2589 static void add_to_super_ddf_bvd(struct supertype *st,
2590 mdu_disk_info_t *dk, int fd, char *devname)
2591 {
2592 /* fd and devname identify a device with-in the ddf container (st).
2593 * dk identifies a location in the new BVD.
2594 * We need to find suitable free space in that device and update
2595 * the phys_refnum and lba_offset for the newly created vd_config.
2596 * We might also want to update the type in the phys_disk
2597 * section.
2598 *
2599 * Alternately: fd == -1 and we have already chosen which device to
2600 * use and recorded in dlist->raid_disk;
2601 */
2602 struct dl *dl;
2603 struct ddf_super *ddf = st->sb;
2604 struct vd_config *vc;
2605 unsigned int i;
2606 unsigned long long blocks, pos, esize;
2607 struct extent *ex;
2608 unsigned int raid_disk = dk->raid_disk;
2609
2610 if (fd == -1) {
2611 for (dl = ddf->dlist; dl ; dl = dl->next)
2612 if (dl->raiddisk == dk->raid_disk)
2613 break;
2614 } else {
2615 for (dl = ddf->dlist; dl ; dl = dl->next)
2616 if (dl->major == dk->major &&
2617 dl->minor == dk->minor)
2618 break;
2619 }
2620 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2621 return;
2622
2623 vc = &ddf->currentconf->conf;
2624 if (vc->sec_elmnt_count > 1) {
2625 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2626 if (raid_disk >= n)
2627 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2628 raid_disk %= n;
2629 }
2630
2631 ex = get_extents(ddf, dl);
2632 if (!ex)
2633 return;
2634
2635 i = 0; pos = 0;
2636 blocks = be64_to_cpu(vc->blocks);
2637 if (ddf->currentconf->block_sizes)
2638 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2639
2640 do {
2641 esize = ex[i].start - pos;
2642 if (esize >= blocks)
2643 break;
2644 pos = ex[i].start + ex[i].size;
2645 i++;
2646 } while (ex[i-1].size);
2647
2648 free(ex);
2649 if (esize < blocks)
2650 return;
2651
2652 ddf->currentdev = dk->raid_disk;
2653 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2654 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2655
2656 for (i = 0; i < ddf->max_part ; i++)
2657 if (dl->vlist[i] == NULL)
2658 break;
2659 if (i == ddf->max_part)
2660 return;
2661 dl->vlist[i] = ddf->currentconf;
2662
2663 if (fd >= 0)
2664 dl->fd = fd;
2665 if (devname)
2666 dl->devname = devname;
2667
2668 /* Check if we can mark array as optimal yet */
2669 i = ddf->currentconf->vcnum;
2670 ddf->virt->entries[i].state =
2671 (ddf->virt->entries[i].state & ~DDF_state_mask)
2672 | get_svd_state(ddf, ddf->currentconf);
2673 be16_clear(ddf->phys->entries[dl->pdnum].type,
2674 cpu_to_be16(DDF_Global_Spare));
2675 be16_set(ddf->phys->entries[dl->pdnum].type,
2676 cpu_to_be16(DDF_Active_in_VD));
2677 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2678 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2679 ddf->currentconf->vcnum, guid_str(vc->guid),
2680 dk->raid_disk);
2681 ddf_set_updates_pending(ddf);
2682 }
2683
2684 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2685 {
2686 unsigned int i;
2687 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2688 if (all_ff(ddf->phys->entries[i].guid))
2689 return i;
2690 }
2691 return DDF_NOTFOUND;
2692 }
2693
2694 /* add a device to a container, either while creating it or while
2695 * expanding a pre-existing container
2696 */
2697 static int add_to_super_ddf(struct supertype *st,
2698 mdu_disk_info_t *dk, int fd, char *devname,
2699 unsigned long long data_offset)
2700 {
2701 struct ddf_super *ddf = st->sb;
2702 struct dl *dd;
2703 time_t now;
2704 struct tm *tm;
2705 unsigned long long size;
2706 struct phys_disk_entry *pde;
2707 unsigned int n, i;
2708 struct stat stb;
2709 __u32 *tptr;
2710
2711 if (ddf->currentconf) {
2712 add_to_super_ddf_bvd(st, dk, fd, devname);
2713 return 0;
2714 }
2715
2716 /* This is device numbered dk->number. We need to create
2717 * a phys_disk entry and a more detailed disk_data entry.
2718 */
2719 fstat(fd, &stb);
2720 n = find_unused_pde(ddf);
2721 if (n == DDF_NOTFOUND) {
2722 pr_err("%s: No free slot in array, cannot add disk\n",
2723 __func__);
2724 return 1;
2725 }
2726 pde = &ddf->phys->entries[n];
2727 get_dev_size(fd, NULL, &size);
2728 if (size <= 32*1024*1024) {
2729 pr_err("%s: device size must be at least 32MB\n",
2730 __func__);
2731 return 1;
2732 }
2733 size >>= 9;
2734
2735 if (posix_memalign((void**)&dd, 512,
2736 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2737 pr_err("%s could allocate buffer for new disk, aborting\n",
2738 __func__);
2739 return 1;
2740 }
2741 dd->major = major(stb.st_rdev);
2742 dd->minor = minor(stb.st_rdev);
2743 dd->devname = devname;
2744 dd->fd = fd;
2745 dd->spare = NULL;
2746
2747 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2748 now = time(0);
2749 tm = localtime(&now);
2750 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2751 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2752 tptr = (__u32 *)(dd->disk.guid + 16);
2753 *tptr++ = random32();
2754 *tptr = random32();
2755
2756 do {
2757 /* Cannot be bothered finding a CRC of some irrelevant details*/
2758 dd->disk.refnum._v32 = random32();
2759 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2760 i > 0; i--)
2761 if (be32_eq(ddf->phys->entries[i-1].refnum,
2762 dd->disk.refnum))
2763 break;
2764 } while (i > 0);
2765
2766 dd->disk.forced_ref = 1;
2767 dd->disk.forced_guid = 1;
2768 memset(dd->disk.vendor, ' ', 32);
2769 memcpy(dd->disk.vendor, "Linux", 5);
2770 memset(dd->disk.pad, 0xff, 442);
2771 for (i = 0; i < ddf->max_part ; i++)
2772 dd->vlist[i] = NULL;
2773
2774 dd->pdnum = n;
2775
2776 if (st->update_tail) {
2777 int len = (sizeof(struct phys_disk) +
2778 sizeof(struct phys_disk_entry));
2779 struct phys_disk *pd;
2780
2781 pd = xmalloc(len);
2782 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2783 pd->used_pdes = cpu_to_be16(n);
2784 pde = &pd->entries[0];
2785 dd->mdupdate = pd;
2786 } else
2787 ddf->phys->used_pdes = cpu_to_be16(
2788 1 + be16_to_cpu(ddf->phys->used_pdes));
2789
2790 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2791 pde->refnum = dd->disk.refnum;
2792 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2793 pde->state = cpu_to_be16(DDF_Online);
2794 dd->size = size;
2795 /*
2796 * If there is already a device in dlist, try to reserve the same
2797 * amount of workspace. Otherwise, use 32MB.
2798 * We checked disk size above already.
2799 */
2800 #define __calc_lba(new, old, lba, mb) do { \
2801 unsigned long long dif; \
2802 if ((old) != NULL) \
2803 dif = (old)->size - be64_to_cpu((old)->lba); \
2804 else \
2805 dif = (new)->size; \
2806 if ((new)->size > dif) \
2807 (new)->lba = cpu_to_be64((new)->size - dif); \
2808 else \
2809 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2810 } while (0)
2811 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2812 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2813 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2814 pde->config_size = dd->workspace_lba;
2815
2816 sprintf(pde->path, "%17.17s","Information: nil") ;
2817 memset(pde->pad, 0xff, 6);
2818
2819 if (st->update_tail) {
2820 dd->next = ddf->add_list;
2821 ddf->add_list = dd;
2822 } else {
2823 dd->next = ddf->dlist;
2824 ddf->dlist = dd;
2825 ddf_set_updates_pending(ddf);
2826 }
2827
2828 return 0;
2829 }
2830
2831 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2832 {
2833 struct ddf_super *ddf = st->sb;
2834 struct dl *dl;
2835
2836 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2837 * disappeared from the container.
2838 * We need to arrange that it disappears from the metadata and
2839 * internal data structures too.
2840 * Most of the work is done by ddf_process_update which edits
2841 * the metadata and closes the file handle and attaches the memory
2842 * where free_updates will free it.
2843 */
2844 for (dl = ddf->dlist; dl ; dl = dl->next)
2845 if (dl->major == dk->major &&
2846 dl->minor == dk->minor)
2847 break;
2848 if (!dl)
2849 return -1;
2850
2851 if (st->update_tail) {
2852 int len = (sizeof(struct phys_disk) +
2853 sizeof(struct phys_disk_entry));
2854 struct phys_disk *pd;
2855
2856 pd = xmalloc(len);
2857 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2858 pd->used_pdes = cpu_to_be16(dl->pdnum);
2859 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2860 append_metadata_update(st, pd, len);
2861 }
2862 return 0;
2863 }
2864 #endif
2865
2866 /*
2867 * This is the write_init_super method for a ddf container. It is
2868 * called when creating a container or adding another device to a
2869 * container.
2870 */
2871 #define NULL_CONF_SZ 4096
2872
2873 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2874 {
2875 unsigned long long sector;
2876 struct ddf_header *header;
2877 int fd, i, n_config, conf_size, buf_size;
2878 int ret = 0;
2879 char *conf;
2880
2881 fd = d->fd;
2882
2883 switch (type) {
2884 case DDF_HEADER_PRIMARY:
2885 header = &ddf->primary;
2886 sector = be64_to_cpu(header->primary_lba);
2887 break;
2888 case DDF_HEADER_SECONDARY:
2889 header = &ddf->secondary;
2890 sector = be64_to_cpu(header->secondary_lba);
2891 break;
2892 default:
2893 return 0;
2894 }
2895
2896 header->type = type;
2897 header->openflag = 1;
2898 header->crc = calc_crc(header, 512);
2899
2900 lseek64(fd, sector<<9, 0);
2901 if (write(fd, header, 512) < 0)
2902 goto out;
2903
2904 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2905 if (write(fd, &ddf->controller, 512) < 0)
2906 goto out;
2907
2908 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2909 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2910 goto out;
2911 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2912 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2913 goto out;
2914
2915 /* Now write lots of config records. */
2916 n_config = ddf->max_part;
2917 conf_size = ddf->conf_rec_len * 512;
2918 conf = ddf->conf;
2919 buf_size = conf_size * (n_config + 1);
2920 if (!conf) {
2921 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
2922 goto out;
2923 ddf->conf = conf;
2924 }
2925 for (i = 0 ; i <= n_config ; i++) {
2926 struct vcl *c;
2927 struct vd_config *vdc = NULL;
2928 if (i == n_config) {
2929 c = (struct vcl *)d->spare;
2930 if (c)
2931 vdc = &c->conf;
2932 } else {
2933 unsigned int dummy;
2934 c = d->vlist[i];
2935 if (c)
2936 get_pd_index_from_refnum(
2937 c, d->disk.refnum,
2938 ddf->mppe,
2939 (const struct vd_config **)&vdc,
2940 &dummy);
2941 }
2942 if (c) {
2943 dprintf("writing conf record %i on disk %08x for %s/%u\n",
2944 i, be32_to_cpu(d->disk.refnum),
2945 guid_str(vdc->guid),
2946 vdc->sec_elmnt_seq);
2947 vdc->seqnum = header->seq;
2948 vdc->crc = calc_crc(vdc, conf_size);
2949 memcpy(conf + i*conf_size, vdc, conf_size);
2950 } else
2951 memset(conf + i*conf_size, 0xff, conf_size);
2952 }
2953 if (write(fd, conf, buf_size) != buf_size)
2954 goto out;
2955
2956 d->disk.crc = calc_crc(&d->disk, 512);
2957 if (write(fd, &d->disk, 512) < 0)
2958 goto out;
2959
2960 ret = 1;
2961 out:
2962 header->openflag = 0;
2963 header->crc = calc_crc(header, 512);
2964
2965 lseek64(fd, sector<<9, 0);
2966 if (write(fd, header, 512) < 0)
2967 ret = 0;
2968
2969 return ret;
2970 }
2971
2972 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
2973 {
2974 unsigned long long size;
2975 int fd = d->fd;
2976 if (fd < 0)
2977 return 0;
2978
2979 /* We need to fill in the primary, (secondary) and workspace
2980 * lba's in the headers, set their checksums,
2981 * Also checksum phys, virt....
2982 *
2983 * Then write everything out, finally the anchor is written.
2984 */
2985 get_dev_size(fd, NULL, &size);
2986 size /= 512;
2987 if (be64_to_cpu(d->workspace_lba) != 0ULL)
2988 ddf->anchor.workspace_lba = d->workspace_lba;
2989 else
2990 ddf->anchor.workspace_lba =
2991 cpu_to_be64(size - 32*1024*2);
2992 if (be64_to_cpu(d->primary_lba) != 0ULL)
2993 ddf->anchor.primary_lba = d->primary_lba;
2994 else
2995 ddf->anchor.primary_lba =
2996 cpu_to_be64(size - 16*1024*2);
2997 if (be64_to_cpu(d->secondary_lba) != 0ULL)
2998 ddf->anchor.secondary_lba = d->secondary_lba;
2999 else
3000 ddf->anchor.secondary_lba =
3001 cpu_to_be64(size - 32*1024*2);
3002 ddf->anchor.seq = ddf->active->seq;
3003 memcpy(&ddf->primary, &ddf->anchor, 512);
3004 memcpy(&ddf->secondary, &ddf->anchor, 512);
3005
3006 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3007 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3008 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3009
3010 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3011 return 0;
3012
3013 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3014 return 0;
3015
3016 lseek64(fd, (size-1)*512, SEEK_SET);
3017 if (write(fd, &ddf->anchor, 512) < 0)
3018 return 0;
3019
3020 return 1;
3021 }
3022
3023 #ifndef MDASSEMBLE
3024 static int __write_init_super_ddf(struct supertype *st)
3025 {
3026 struct ddf_super *ddf = st->sb;
3027 struct dl *d;
3028 int attempts = 0;
3029 int successes = 0;
3030
3031 pr_state(ddf, __func__);
3032
3033 /* try to write updated metadata,
3034 * if we catch a failure move on to the next disk
3035 */
3036 for (d = ddf->dlist; d; d=d->next) {
3037 attempts++;
3038 successes += _write_super_to_disk(ddf, d);
3039 }
3040
3041 return attempts != successes;
3042 }
3043
3044 static int write_init_super_ddf(struct supertype *st)
3045 {
3046 struct ddf_super *ddf = st->sb;
3047 struct vcl *currentconf = ddf->currentconf;
3048
3049 /* we are done with currentconf reset it to point st at the container */
3050 ddf->currentconf = NULL;
3051
3052 if (st->update_tail) {
3053 /* queue the virtual_disk and vd_config as metadata updates */
3054 struct virtual_disk *vd;
3055 struct vd_config *vc;
3056 int len, tlen;
3057 unsigned int i;
3058
3059 if (!currentconf) {
3060 int len = (sizeof(struct phys_disk) +
3061 sizeof(struct phys_disk_entry));
3062
3063 /* adding a disk to the container. */
3064 if (!ddf->add_list)
3065 return 0;
3066
3067 append_metadata_update(st, ddf->add_list->mdupdate, len);
3068 ddf->add_list->mdupdate = NULL;
3069 return 0;
3070 }
3071
3072 /* Newly created VD */
3073
3074 /* First the virtual disk. We have a slightly fake header */
3075 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3076 vd = xmalloc(len);
3077 *vd = *ddf->virt;
3078 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3079 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3080 append_metadata_update(st, vd, len);
3081
3082 /* Then the vd_config */
3083 len = ddf->conf_rec_len * 512;
3084 tlen = len * currentconf->conf.sec_elmnt_count;
3085 vc = xmalloc(tlen);
3086 memcpy(vc, &currentconf->conf, len);
3087 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3088 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3089 len);
3090 append_metadata_update(st, vc, tlen);
3091
3092 /* FIXME I need to close the fds! */
3093 return 0;
3094 } else {
3095 struct dl *d;
3096 if (!currentconf)
3097 for (d = ddf->dlist; d; d=d->next)
3098 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3099 return __write_init_super_ddf(st);
3100 }
3101 }
3102
3103 #endif
3104
3105 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3106 unsigned long long data_offset)
3107 {
3108 /* We must reserve the last 32Meg */
3109 if (devsize <= 32*1024*2)
3110 return 0;
3111 return devsize - 32*1024*2;
3112 }
3113
3114 #ifndef MDASSEMBLE
3115
3116 static int reserve_space(struct supertype *st, int raiddisks,
3117 unsigned long long size, int chunk,
3118 unsigned long long *freesize)
3119 {
3120 /* Find 'raiddisks' spare extents at least 'size' big (but
3121 * only caring about multiples of 'chunk') and remember
3122 * them.
3123 * If the cannot be found, fail.
3124 */
3125 struct dl *dl;
3126 struct ddf_super *ddf = st->sb;
3127 int cnt = 0;
3128
3129 for (dl = ddf->dlist; dl ; dl=dl->next) {
3130 dl->raiddisk = -1;
3131 dl->esize = 0;
3132 }
3133 /* Now find largest extent on each device */
3134 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3135 struct extent *e = get_extents(ddf, dl);
3136 unsigned long long pos = 0;
3137 int i = 0;
3138 int found = 0;
3139 unsigned long long minsize = size;
3140
3141 if (size == 0)
3142 minsize = chunk;
3143
3144 if (!e)
3145 continue;
3146 do {
3147 unsigned long long esize;
3148 esize = e[i].start - pos;
3149 if (esize >= minsize) {
3150 found = 1;
3151 minsize = esize;
3152 }
3153 pos = e[i].start + e[i].size;
3154 i++;
3155 } while (e[i-1].size);
3156 if (found) {
3157 cnt++;
3158 dl->esize = minsize;
3159 }
3160 free(e);
3161 }
3162 if (cnt < raiddisks) {
3163 pr_err("not enough devices with space to create array.\n");
3164 return 0; /* No enough free spaces large enough */
3165 }
3166 if (size == 0) {
3167 /* choose the largest size of which there are at least 'raiddisk' */
3168 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3169 struct dl *dl2;
3170 if (dl->esize <= size)
3171 continue;
3172 /* This is bigger than 'size', see if there are enough */
3173 cnt = 0;
3174 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3175 if (dl2->esize >= dl->esize)
3176 cnt++;
3177 if (cnt >= raiddisks)
3178 size = dl->esize;
3179 }
3180 if (chunk) {
3181 size = size / chunk;
3182 size *= chunk;
3183 }
3184 *freesize = size;
3185 if (size < 32) {
3186 pr_err("not enough spare devices to create array.\n");
3187 return 0;
3188 }
3189 }
3190 /* We have a 'size' of which there are enough spaces.
3191 * We simply do a first-fit */
3192 cnt = 0;
3193 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3194 if (dl->esize < size)
3195 continue;
3196
3197 dl->raiddisk = cnt;
3198 cnt++;
3199 }
3200 return 1;
3201 }
3202
3203 static int
3204 validate_geometry_ddf_container(struct supertype *st,
3205 int level, int layout, int raiddisks,
3206 int chunk, unsigned long long size,
3207 unsigned long long data_offset,
3208 char *dev, unsigned long long *freesize,
3209 int verbose);
3210
3211 static int validate_geometry_ddf_bvd(struct supertype *st,
3212 int level, int layout, int raiddisks,
3213 int *chunk, unsigned long long size,
3214 unsigned long long data_offset,
3215 char *dev, unsigned long long *freesize,
3216 int verbose);
3217
3218 static int validate_geometry_ddf(struct supertype *st,
3219 int level, int layout, int raiddisks,
3220 int *chunk, unsigned long long size,
3221 unsigned long long data_offset,
3222 char *dev, unsigned long long *freesize,
3223 int verbose)
3224 {
3225 int fd;
3226 struct mdinfo *sra;
3227 int cfd;
3228
3229 /* ddf potentially supports lots of things, but it depends on
3230 * what devices are offered (and maybe kernel version?)
3231 * If given unused devices, we will make a container.
3232 * If given devices in a container, we will make a BVD.
3233 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3234 */
3235
3236 if (*chunk == UnSet)
3237 *chunk = DEFAULT_CHUNK;
3238
3239 if (level == -1000000) level = LEVEL_CONTAINER;
3240 if (level == LEVEL_CONTAINER) {
3241 /* Must be a fresh device to add to a container */
3242 return validate_geometry_ddf_container(st, level, layout,
3243 raiddisks, *chunk,
3244 size, data_offset, dev,
3245 freesize,
3246 verbose);
3247 }
3248
3249 if (!dev) {
3250 mdu_array_info_t array = {
3251 .level = level, .layout = layout,
3252 .raid_disks = raiddisks
3253 };
3254 struct vd_config conf;
3255 if (layout_md2ddf(&array, &conf) == -1) {
3256 if (verbose)
3257 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3258 level, layout, raiddisks);
3259 return 0;
3260 }
3261 /* Should check layout? etc */
3262
3263 if (st->sb && freesize) {
3264 /* --create was given a container to create in.
3265 * So we need to check that there are enough
3266 * free spaces and return the amount of space.
3267 * We may as well remember which drives were
3268 * chosen so that add_to_super/getinfo_super
3269 * can return them.
3270 */
3271 return reserve_space(st, raiddisks, size, *chunk, freesize);
3272 }
3273 return 1;
3274 }
3275
3276 if (st->sb) {
3277 /* A container has already been opened, so we are
3278 * creating in there. Maybe a BVD, maybe an SVD.
3279 * Should make a distinction one day.
3280 */
3281 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3282 chunk, size, data_offset, dev,
3283 freesize,
3284 verbose);
3285 }
3286 /* This is the first device for the array.
3287 * If it is a container, we read it in and do automagic allocations,
3288 * no other devices should be given.
3289 * Otherwise it must be a member device of a container, and we
3290 * do manual allocation.
3291 * Later we should check for a BVD and make an SVD.
3292 */
3293 fd = open(dev, O_RDONLY|O_EXCL, 0);
3294 if (fd >= 0) {
3295 sra = sysfs_read(fd, NULL, GET_VERSION);
3296 close(fd);
3297 if (sra && sra->array.major_version == -1 &&
3298 strcmp(sra->text_version, "ddf") == 0) {
3299
3300 /* load super */
3301 /* find space for 'n' devices. */
3302 /* remember the devices */
3303 /* Somehow return the fact that we have enough */
3304 }
3305
3306 if (verbose)
3307 pr_err("ddf: Cannot create this array "
3308 "on device %s - a container is required.\n",
3309 dev);
3310 return 0;
3311 }
3312 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3313 if (verbose)
3314 pr_err("ddf: Cannot open %s: %s\n",
3315 dev, strerror(errno));
3316 return 0;
3317 }
3318 /* Well, it is in use by someone, maybe a 'ddf' container. */
3319 cfd = open_container(fd);
3320 if (cfd < 0) {
3321 close(fd);
3322 if (verbose)
3323 pr_err("ddf: Cannot use %s: %s\n",
3324 dev, strerror(EBUSY));
3325 return 0;
3326 }
3327 sra = sysfs_read(cfd, NULL, GET_VERSION);
3328 close(fd);
3329 if (sra && sra->array.major_version == -1 &&
3330 strcmp(sra->text_version, "ddf") == 0) {
3331 /* This is a member of a ddf container. Load the container
3332 * and try to create a bvd
3333 */
3334 struct ddf_super *ddf;
3335 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3336 st->sb = ddf;
3337 strcpy(st->container_devnm, fd2devnm(cfd));
3338 close(cfd);
3339 return validate_geometry_ddf_bvd(st, level, layout,
3340 raiddisks, chunk, size,
3341 data_offset,
3342 dev, freesize,
3343 verbose);
3344 }
3345 close(cfd);
3346 } else /* device may belong to a different container */
3347 return 0;
3348
3349 return 1;
3350 }
3351
3352 static int
3353 validate_geometry_ddf_container(struct supertype *st,
3354 int level, int layout, int raiddisks,
3355 int chunk, unsigned long long size,
3356 unsigned long long data_offset,
3357 char *dev, unsigned long long *freesize,
3358 int verbose)
3359 {
3360 int fd;
3361 unsigned long long ldsize;
3362
3363 if (level != LEVEL_CONTAINER)
3364 return 0;
3365 if (!dev)
3366 return 1;
3367
3368 fd = open(dev, O_RDONLY|O_EXCL, 0);
3369 if (fd < 0) {
3370 if (verbose)
3371 pr_err("ddf: Cannot open %s: %s\n",
3372 dev, strerror(errno));
3373 return 0;
3374 }
3375 if (!get_dev_size(fd, dev, &ldsize)) {
3376 close(fd);
3377 return 0;
3378 }
3379 close(fd);
3380
3381 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3382 if (*freesize == 0)
3383 return 0;
3384
3385 return 1;
3386 }
3387
3388 static int validate_geometry_ddf_bvd(struct supertype *st,
3389 int level, int layout, int raiddisks,
3390 int *chunk, unsigned long long size,
3391 unsigned long long data_offset,
3392 char *dev, unsigned long long *freesize,
3393 int verbose)
3394 {
3395 struct stat stb;
3396 struct ddf_super *ddf = st->sb;
3397 struct dl *dl;
3398 unsigned long long pos = 0;
3399 unsigned long long maxsize;
3400 struct extent *e;
3401 int i;
3402 /* ddf/bvd supports lots of things, but not containers */
3403 if (level == LEVEL_CONTAINER) {
3404 if (verbose)
3405 pr_err("DDF cannot create a container within an container\n");
3406 return 0;
3407 }
3408 /* We must have the container info already read in. */
3409 if (!ddf)
3410 return 0;
3411
3412 if (!dev) {
3413 /* General test: make sure there is space for
3414 * 'raiddisks' device extents of size 'size'.
3415 */
3416 unsigned long long minsize = size;
3417 int dcnt = 0;
3418 if (minsize == 0)
3419 minsize = 8;
3420 for (dl = ddf->dlist; dl ; dl = dl->next)
3421 {
3422 int found = 0;
3423 pos = 0;
3424
3425 i = 0;
3426 e = get_extents(ddf, dl);
3427 if (!e) continue;
3428 do {
3429 unsigned long long esize;
3430 esize = e[i].start - pos;
3431 if (esize >= minsize)
3432 found = 1;
3433 pos = e[i].start + e[i].size;
3434 i++;
3435 } while (e[i-1].size);
3436 if (found)
3437 dcnt++;
3438 free(e);
3439 }
3440 if (dcnt < raiddisks) {
3441 if (verbose)
3442 pr_err("ddf: Not enough devices with "
3443 "space for this array (%d < %d)\n",
3444 dcnt, raiddisks);
3445 return 0;
3446 }
3447 return 1;
3448 }
3449 /* This device must be a member of the set */
3450 if (stat(dev, &stb) < 0)
3451 return 0;
3452 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3453 return 0;
3454 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3455 if (dl->major == (int)major(stb.st_rdev) &&
3456 dl->minor == (int)minor(stb.st_rdev))
3457 break;
3458 }
3459 if (!dl) {
3460 if (verbose)
3461 pr_err("ddf: %s is not in the "
3462 "same DDF set\n",
3463 dev);
3464 return 0;
3465 }
3466 e = get_extents(ddf, dl);
3467 maxsize = 0;
3468 i = 0;
3469 if (e) do {
3470 unsigned long long esize;
3471 esize = e[i].start - pos;
3472 if (esize >= maxsize)
3473 maxsize = esize;
3474 pos = e[i].start + e[i].size;
3475 i++;
3476 } while (e[i-1].size);
3477 *freesize = maxsize;
3478 // FIXME here I am
3479
3480 return 1;
3481 }
3482
3483 static int load_super_ddf_all(struct supertype *st, int fd,
3484 void **sbp, char *devname)
3485 {
3486 struct mdinfo *sra;
3487 struct ddf_super *super;
3488 struct mdinfo *sd, *best = NULL;
3489 int bestseq = 0;
3490 int seq;
3491 char nm[20];
3492 int dfd;
3493
3494 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3495 if (!sra)
3496 return 1;
3497 if (sra->array.major_version != -1 ||
3498 sra->array.minor_version != -2 ||
3499 strcmp(sra->text_version, "ddf") != 0)
3500 return 1;
3501
3502 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3503 return 1;
3504 memset(super, 0, sizeof(*super));
3505
3506 /* first, try each device, and choose the best ddf */
3507 for (sd = sra->devs ; sd ; sd = sd->next) {
3508 int rv;
3509 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3510 dfd = dev_open(nm, O_RDONLY);
3511 if (dfd < 0)
3512 return 2;
3513 rv = load_ddf_headers(dfd, super, NULL);
3514 close(dfd);
3515 if (rv == 0) {
3516 seq = be32_to_cpu(super->active->seq);
3517 if (super->active->openflag)
3518 seq--;
3519 if (!best || seq > bestseq) {
3520 bestseq = seq;
3521 best = sd;
3522 }
3523 }
3524 }
3525 if (!best)
3526 return 1;
3527 /* OK, load this ddf */
3528 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3529 dfd = dev_open(nm, O_RDONLY);
3530 if (dfd < 0)
3531 return 1;
3532 load_ddf_headers(dfd, super, NULL);
3533 load_ddf_global(dfd, super, NULL);
3534 close(dfd);
3535 /* Now we need the device-local bits */
3536 for (sd = sra->devs ; sd ; sd = sd->next) {
3537 int rv;
3538
3539 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3540 dfd = dev_open(nm, O_RDWR);
3541 if (dfd < 0)
3542 return 2;
3543 rv = load_ddf_headers(dfd, super, NULL);
3544 if (rv == 0)
3545 rv = load_ddf_local(dfd, super, NULL, 1);
3546 if (rv)
3547 return 1;
3548 }
3549
3550 *sbp = super;
3551 if (st->ss == NULL) {
3552 st->ss = &super_ddf;
3553 st->minor_version = 0;
3554 st->max_devs = 512;
3555 }
3556 strcpy(st->container_devnm, fd2devnm(fd));
3557 return 0;
3558 }
3559
3560 static int load_container_ddf(struct supertype *st, int fd,
3561 char *devname)
3562 {
3563 return load_super_ddf_all(st, fd, &st->sb, devname);
3564 }
3565
3566 #endif /* MDASSEMBLE */
3567
3568 static int check_secondary(const struct vcl *vc)
3569 {
3570 const struct vd_config *conf = &vc->conf;
3571 int i;
3572
3573 /* The only DDF secondary RAID level md can support is
3574 * RAID 10, if the stripe sizes and Basic volume sizes
3575 * are all equal.
3576 * Other configurations could in theory be supported by exposing
3577 * the BVDs to user space and using device mapper for the secondary
3578 * mapping. So far we don't support that.
3579 */
3580
3581 __u64 sec_elements[4] = {0, 0, 0, 0};
3582 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3583 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3584
3585 if (vc->other_bvds == NULL) {
3586 pr_err("No BVDs for secondary RAID found\n");
3587 return -1;
3588 }
3589 if (conf->prl != DDF_RAID1) {
3590 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3591 return -1;
3592 }
3593 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3594 pr_err("Secondary RAID level %d is unsupported\n",
3595 conf->srl);
3596 return -1;
3597 }
3598 __set_sec_seen(conf->sec_elmnt_seq);
3599 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3600 const struct vd_config *bvd = vc->other_bvds[i];
3601 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3602 continue;
3603 if (bvd->srl != conf->srl) {
3604 pr_err("Inconsistent secondary RAID level across BVDs\n");
3605 return -1;
3606 }
3607 if (bvd->prl != conf->prl) {
3608 pr_err("Different RAID levels for BVDs are unsupported\n");
3609 return -1;
3610 }
3611 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3612 pr_err("All BVDs must have the same number of primary elements\n");
3613 return -1;
3614 }
3615 if (bvd->chunk_shift != conf->chunk_shift) {
3616 pr_err("Different strip sizes for BVDs are unsupported\n");
3617 return -1;
3618 }
3619 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3620 pr_err("Different BVD sizes are unsupported\n");
3621 return -1;
3622 }
3623 __set_sec_seen(bvd->sec_elmnt_seq);
3624 }
3625 for (i = 0; i < conf->sec_elmnt_count; i++) {
3626 if (!__was_sec_seen(i)) {
3627 pr_err("BVD %d is missing\n", i);
3628 return -1;
3629 }
3630 }
3631 return 0;
3632 }
3633
3634 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3635 be32 refnum, unsigned int nmax,
3636 const struct vd_config **bvd,
3637 unsigned int *idx)
3638 {
3639 unsigned int i, j, n, sec, cnt;
3640
3641 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3642 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3643
3644 for (i = 0, j = 0 ; i < nmax ; i++) {
3645 /* j counts valid entries for this BVD */
3646 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3647 j++;
3648 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3649 *bvd = &vc->conf;
3650 *idx = i;
3651 return sec * cnt + j - 1;
3652 }
3653 }
3654 if (vc->other_bvds == NULL)
3655 goto bad;
3656
3657 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3658 struct vd_config *vd = vc->other_bvds[n-1];
3659 sec = vd->sec_elmnt_seq;
3660 if (sec == DDF_UNUSED_BVD)
3661 continue;
3662 for (i = 0, j = 0 ; i < nmax ; i++) {
3663 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3664 j++;
3665 if (be32_eq(vd->phys_refnum[i], refnum)) {
3666 *bvd = vd;
3667 *idx = i;
3668 return sec * cnt + j - 1;
3669 }
3670 }
3671 }
3672 bad:
3673 *bvd = NULL;
3674 return DDF_NOTFOUND;
3675 }
3676
3677 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3678 {
3679 /* Given a container loaded by load_super_ddf_all,
3680 * extract information about all the arrays into
3681 * an mdinfo tree.
3682 *
3683 * For each vcl in conflist: create an mdinfo, fill it in,
3684 * then look for matching devices (phys_refnum) in dlist
3685 * and create appropriate device mdinfo.
3686 */
3687 struct ddf_super *ddf = st->sb;
3688 struct mdinfo *rest = NULL;
3689 struct vcl *vc;
3690
3691 for (vc = ddf->conflist ; vc ; vc=vc->next)
3692 {
3693 unsigned int i;
3694 unsigned int j;
3695 struct mdinfo *this;
3696 char *ep;
3697 __u32 *cptr;
3698 unsigned int pd;
3699
3700 if (subarray &&
3701 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3702 *ep != '\0'))
3703 continue;
3704
3705 if (vc->conf.sec_elmnt_count > 1) {
3706 if (check_secondary(vc) != 0)
3707 continue;
3708 }
3709
3710 this = xcalloc(1, sizeof(*this));
3711 this->next = rest;
3712 rest = this;
3713
3714 if (layout_ddf2md(&vc->conf, &this->array))
3715 continue;
3716 this->array.md_minor = -1;
3717 this->array.major_version = -1;
3718 this->array.minor_version = -2;
3719 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3720 cptr = (__u32 *)(vc->conf.guid + 16);
3721 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3722 this->array.utime = DECADE +
3723 be32_to_cpu(vc->conf.timestamp);
3724 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3725
3726 i = vc->vcnum;
3727 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3728 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3729 DDF_init_full) {
3730 this->array.state = 0;
3731 this->resync_start = 0;
3732 } else {
3733 this->array.state = 1;
3734 this->resync_start = MaxSector;
3735 }
3736 memcpy(this->name, ddf->virt->entries[i].name, 16);
3737 this->name[16]=0;
3738 for(j=0; j<16; j++)
3739 if (this->name[j] == ' ')
3740 this->name[j] = 0;
3741
3742 memset(this->uuid, 0, sizeof(this->uuid));
3743 this->component_size = be64_to_cpu(vc->conf.blocks);
3744 this->array.size = this->component_size / 2;
3745 this->container_member = i;
3746
3747 ddf->currentconf = vc;
3748 uuid_from_super_ddf(st, this->uuid);
3749 if (!subarray)
3750 ddf->currentconf = NULL;
3751
3752 sprintf(this->text_version, "/%s/%d",
3753 st->container_devnm, this->container_member);
3754
3755 for (pd = 0; pd < be16_to_cpu(ddf->phys->used_pdes); pd++) {
3756 struct mdinfo *dev;
3757 struct dl *d;
3758 const struct vd_config *bvd;
3759 unsigned int iphys;
3760 int stt;
3761
3762 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3763 == 0xFFFFFFFF)
3764 continue;
3765
3766 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3767 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3768 != DDF_Online)
3769 continue;
3770
3771 i = get_pd_index_from_refnum(
3772 vc, ddf->phys->entries[pd].refnum,
3773 ddf->mppe, &bvd, &iphys);
3774 if (i == DDF_NOTFOUND)
3775 continue;
3776
3777 this->array.working_disks++;
3778
3779 for (d = ddf->dlist; d ; d=d->next)
3780 if (be32_eq(d->disk.refnum,
3781 ddf->phys->entries[pd].refnum))
3782 break;
3783 if (d == NULL)
3784 /* Haven't found that one yet, maybe there are others */
3785 continue;
3786
3787 dev = xcalloc(1, sizeof(*dev));
3788 dev->next = this->devs;
3789 this->devs = dev;
3790
3791 dev->disk.number = be32_to_cpu(d->disk.refnum);
3792 dev->disk.major = d->major;
3793 dev->disk.minor = d->minor;
3794 dev->disk.raid_disk = i;
3795 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3796 dev->recovery_start = MaxSector;
3797
3798 dev->events = be32_to_cpu(ddf->primary.seq);
3799 dev->data_offset =
3800 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3801 dev->component_size = be64_to_cpu(bvd->blocks);
3802 if (d->devname)
3803 strcpy(dev->name, d->devname);
3804 }
3805 }
3806 return rest;
3807 }
3808
3809 static int store_super_ddf(struct supertype *st, int fd)
3810 {
3811 struct ddf_super *ddf = st->sb;
3812 unsigned long long dsize;
3813 void *buf;
3814 int rc;
3815
3816 if (!ddf)
3817 return 1;
3818
3819 if (!get_dev_size(fd, NULL, &dsize))
3820 return 1;
3821
3822 if (ddf->dlist || ddf->conflist) {
3823 struct stat sta;
3824 struct dl *dl;
3825 int ofd, ret;
3826
3827 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3828 pr_err("%s: file descriptor for invalid device\n",
3829 __func__);
3830 return 1;
3831 }
3832 for (dl = ddf->dlist; dl; dl = dl->next)
3833 if (dl->major == (int)major(sta.st_rdev) &&
3834 dl->minor == (int)minor(sta.st_rdev))
3835 break;
3836 if (!dl) {
3837 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3838 (int)major(sta.st_rdev),
3839 (int)minor(sta.st_rdev));
3840 return 1;
3841 }
3842 ofd = dl->fd;
3843 dl->fd = fd;
3844 ret = (_write_super_to_disk(ddf, dl) != 1);
3845 dl->fd = ofd;
3846 return ret;
3847 }
3848
3849 if (posix_memalign(&buf, 512, 512) != 0)
3850 return 1;
3851 memset(buf, 0, 512);
3852
3853 lseek64(fd, dsize-512, 0);
3854 rc = write(fd, buf, 512);
3855 free(buf);
3856 if (rc < 0)
3857 return 1;
3858 return 0;
3859 }
3860
3861 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3862 {
3863 /*
3864 * return:
3865 * 0 same, or first was empty, and second was copied
3866 * 1 second had wrong number
3867 * 2 wrong uuid
3868 * 3 wrong other info
3869 */
3870 struct ddf_super *first = st->sb;
3871 struct ddf_super *second = tst->sb;
3872 struct dl *dl1, *dl2;
3873 struct vcl *vl1, *vl2;
3874 unsigned int max_vds, max_pds, pd, vd;
3875
3876 if (!first) {
3877 st->sb = tst->sb;
3878 tst->sb = NULL;
3879 return 0;
3880 }
3881
3882 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3883 return 2;
3884
3885 if (!be32_eq(first->anchor.seq, second->anchor.seq)) {
3886 dprintf("%s: sequence number mismatch %u/%u\n", __func__,
3887 be32_to_cpu(first->anchor.seq),
3888 be32_to_cpu(second->anchor.seq));
3889 return 3;
3890 }
3891 if (first->max_part != second->max_part ||
3892 !be16_eq(first->phys->used_pdes, second->phys->used_pdes) ||
3893 !be16_eq(first->virt->populated_vdes,
3894 second->virt->populated_vdes)) {
3895 dprintf("%s: PD/VD number mismatch\n", __func__);
3896 return 3;
3897 }
3898
3899 max_pds = be16_to_cpu(first->phys->used_pdes);
3900 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3901 for (pd = 0; pd < max_pds; pd++)
3902 if (be32_eq(first->phys->entries[pd].refnum,
3903 dl2->disk.refnum))
3904 break;
3905 if (pd == max_pds) {
3906 dprintf("%s: no match for disk %08x\n", __func__,
3907 be32_to_cpu(dl2->disk.refnum));
3908 return 3;
3909 }
3910 }
3911
3912 max_vds = be16_to_cpu(first->active->max_vd_entries);
3913 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3914 if (!be32_eq(vl2->conf.magic, DDF_VD_CONF_MAGIC))
3915 continue;
3916 for (vd = 0; vd < max_vds; vd++)
3917 if (!memcmp(first->virt->entries[vd].guid,
3918 vl2->conf.guid, DDF_GUID_LEN))
3919 break;
3920 if (vd == max_vds) {
3921 dprintf("%s: no match for VD config\n", __func__);
3922 return 3;
3923 }
3924 }
3925 /* FIXME should I look at anything else? */
3926
3927 /*
3928 At this point we are fairly sure that the meta data matches.
3929 But the new disk may contain additional local data.
3930 Add it to the super block.
3931 */
3932 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3933 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3934 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3935 DDF_GUID_LEN))
3936 break;
3937 if (vl1) {
3938 if (vl1->other_bvds != NULL &&
3939 vl1->conf.sec_elmnt_seq !=
3940 vl2->conf.sec_elmnt_seq) {
3941 dprintf("%s: adding BVD %u\n", __func__,
3942 vl2->conf.sec_elmnt_seq);
3943 add_other_bvd(vl1, &vl2->conf,
3944 first->conf_rec_len*512);
3945 }
3946 continue;
3947 }
3948
3949 if (posix_memalign((void **)&vl1, 512,
3950 (first->conf_rec_len*512 +
3951 offsetof(struct vcl, conf))) != 0) {
3952 pr_err("%s could not allocate vcl buf\n",
3953 __func__);
3954 return 3;
3955 }
3956
3957 vl1->next = first->conflist;
3958 vl1->block_sizes = NULL;
3959 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3960 if (alloc_other_bvds(first, vl1) != 0) {
3961 pr_err("%s could not allocate other bvds\n",
3962 __func__);
3963 free(vl1);
3964 return 3;
3965 }
3966 for (vd = 0; vd < max_vds; vd++)
3967 if (!memcmp(first->virt->entries[vd].guid,
3968 vl1->conf.guid, DDF_GUID_LEN))
3969 break;
3970 vl1->vcnum = vd;
3971 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
3972 first->conflist = vl1;
3973 }
3974
3975 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3976 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3977 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
3978 break;
3979 if (dl1)
3980 continue;
3981
3982 if (posix_memalign((void **)&dl1, 512,
3983 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
3984 != 0) {
3985 pr_err("%s could not allocate disk info buffer\n",
3986 __func__);
3987 return 3;
3988 }
3989 memcpy(dl1, dl2, sizeof(*dl1));
3990 dl1->mdupdate = NULL;
3991 dl1->next = first->dlist;
3992 dl1->fd = -1;
3993 for (pd = 0; pd < max_pds; pd++)
3994 if (be32_eq(first->phys->entries[pd].refnum,
3995 dl1->disk.refnum))
3996 break;
3997 dl1->pdnum = pd;
3998 if (dl2->spare) {
3999 if (posix_memalign((void **)&dl1->spare, 512,
4000 first->conf_rec_len*512) != 0) {
4001 pr_err("%s could not allocate spare info buf\n",
4002 __func__);
4003 return 3;
4004 }
4005 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
4006 }
4007 for (vd = 0 ; vd < first->max_part ; vd++) {
4008 if (!dl2->vlist[vd]) {
4009 dl1->vlist[vd] = NULL;
4010 continue;
4011 }
4012 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
4013 if (!memcmp(vl1->conf.guid,
4014 dl2->vlist[vd]->conf.guid,
4015 DDF_GUID_LEN))
4016 break;
4017 dl1->vlist[vd] = vl1;
4018 }
4019 }
4020 first->dlist = dl1;
4021 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
4022 be32_to_cpu(dl1->disk.refnum));
4023 }
4024
4025 return 0;
4026 }
4027
4028 #ifndef MDASSEMBLE
4029 /*
4030 * A new array 'a' has been started which claims to be instance 'inst'
4031 * within container 'c'.
4032 * We need to confirm that the array matches the metadata in 'c' so
4033 * that we don't corrupt any metadata.
4034 */
4035 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4036 {
4037 struct ddf_super *ddf = c->sb;
4038 int n = atoi(inst);
4039 struct mdinfo *dev;
4040 struct dl *dl;
4041 static const char faulty[] = "faulty";
4042
4043 if (all_ff(ddf->virt->entries[n].guid)) {
4044 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4045 return -ENODEV;
4046 }
4047 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
4048 guid_str(ddf->virt->entries[n].guid));
4049 for (dev = a->info.devs; dev; dev = dev->next) {
4050 for (dl = ddf->dlist; dl; dl = dl->next)
4051 if (dl->major == dev->disk.major &&
4052 dl->minor == dev->disk.minor)
4053 break;
4054 if (!dl) {
4055 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4056 __func__, dev->disk.major, dev->disk.minor, n);
4057 return -1;
4058 }
4059 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4060 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4061 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4062 __func__, n, dl->major, dl->minor,
4063 be16_to_cpu(
4064 ddf->phys->entries[dl->pdnum].state));
4065 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4066 sizeof(faulty) - 1)
4067 pr_err("Write to state_fd failed\n");
4068 dev->curr_state = DS_FAULTY;
4069 }
4070 }
4071 a->info.container_member = n;
4072 return 0;
4073 }
4074
4075 /*
4076 * The array 'a' is to be marked clean in the metadata.
4077 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4078 * clean up to the point (in sectors). If that cannot be recorded in the
4079 * metadata, then leave it as dirty.
4080 *
4081 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4082 * !global! virtual_disk.virtual_entry structure.
4083 */
4084 static int ddf_set_array_state(struct active_array *a, int consistent)
4085 {
4086 struct ddf_super *ddf = a->container->sb;
4087 int inst = a->info.container_member;
4088 int old = ddf->virt->entries[inst].state;
4089 if (consistent == 2) {
4090 /* Should check if a recovery should be started FIXME */
4091 consistent = 1;
4092 if (!is_resync_complete(&a->info))
4093 consistent = 0;
4094 }
4095 if (consistent)
4096 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4097 else
4098 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4099 if (old != ddf->virt->entries[inst].state)
4100 ddf_set_updates_pending(ddf);
4101
4102 old = ddf->virt->entries[inst].init_state;
4103 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4104 if (is_resync_complete(&a->info))
4105 ddf->virt->entries[inst].init_state |= DDF_init_full;
4106 else if (a->info.resync_start == 0)
4107 ddf->virt->entries[inst].init_state |= DDF_init_not;
4108 else
4109 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4110 if (old != ddf->virt->entries[inst].init_state)
4111 ddf_set_updates_pending(ddf);
4112
4113 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4114 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4115 consistent?"clean":"dirty",
4116 a->info.resync_start);
4117 return consistent;
4118 }
4119
4120 static int get_bvd_state(const struct ddf_super *ddf,
4121 const struct vd_config *vc)
4122 {
4123 unsigned int i, n_bvd, working = 0;
4124 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4125 int pd, st, state;
4126 for (i = 0; i < n_prim; i++) {
4127 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4128 continue;
4129 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4130 if (pd < 0)
4131 continue;
4132 st = be16_to_cpu(ddf->phys->entries[pd].state);
4133 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4134 == DDF_Online)
4135 working++;
4136 }
4137
4138 state = DDF_state_degraded;
4139 if (working == n_prim)
4140 state = DDF_state_optimal;
4141 else
4142 switch (vc->prl) {
4143 case DDF_RAID0:
4144 case DDF_CONCAT:
4145 case DDF_JBOD:
4146 state = DDF_state_failed;
4147 break;
4148 case DDF_RAID1:
4149 if (working == 0)
4150 state = DDF_state_failed;
4151 else if (working >= 2)
4152 state = DDF_state_part_optimal;
4153 break;
4154 case DDF_RAID4:
4155 case DDF_RAID5:
4156 if (working < n_prim - 1)
4157 state = DDF_state_failed;
4158 break;
4159 case DDF_RAID6:
4160 if (working < n_prim - 2)
4161 state = DDF_state_failed;
4162 else if (working == n_prim - 1)
4163 state = DDF_state_part_optimal;
4164 break;
4165 }
4166 return state;
4167 }
4168
4169 static int secondary_state(int state, int other, int seclevel)
4170 {
4171 if (state == DDF_state_optimal && other == DDF_state_optimal)
4172 return DDF_state_optimal;
4173 if (seclevel == DDF_2MIRRORED) {
4174 if (state == DDF_state_optimal || other == DDF_state_optimal)
4175 return DDF_state_part_optimal;
4176 if (state == DDF_state_failed && other == DDF_state_failed)
4177 return DDF_state_failed;
4178 return DDF_state_degraded;
4179 } else {
4180 if (state == DDF_state_failed || other == DDF_state_failed)
4181 return DDF_state_failed;
4182 if (state == DDF_state_degraded || other == DDF_state_degraded)
4183 return DDF_state_degraded;
4184 return DDF_state_part_optimal;
4185 }
4186 }
4187
4188 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4189 {
4190 int state = get_bvd_state(ddf, &vcl->conf);
4191 unsigned int i;
4192 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4193 state = secondary_state(
4194 state,
4195 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4196 vcl->conf.srl);
4197 }
4198 return state;
4199 }
4200
4201 /*
4202 * The state of each disk is stored in the global phys_disk structure
4203 * in phys_disk.entries[n].state.
4204 * This makes various combinations awkward.
4205 * - When a device fails in any array, it must be failed in all arrays
4206 * that include a part of this device.
4207 * - When a component is rebuilding, we cannot include it officially in the
4208 * array unless this is the only array that uses the device.
4209 *
4210 * So: when transitioning:
4211 * Online -> failed, just set failed flag. monitor will propagate
4212 * spare -> online, the device might need to be added to the array.
4213 * spare -> failed, just set failed. Don't worry if in array or not.
4214 */
4215 static void ddf_set_disk(struct active_array *a, int n, int state)
4216 {
4217 struct ddf_super *ddf = a->container->sb;
4218 unsigned int inst = a->info.container_member, n_bvd;
4219 struct vcl *vcl;
4220 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4221 &n_bvd, &vcl);
4222 int pd;
4223 struct mdinfo *mdi;
4224 struct dl *dl;
4225
4226 dprintf("%s: %d to %x\n", __func__, n, state);
4227 if (vc == NULL) {
4228 dprintf("ddf: cannot find instance %d!!\n", inst);
4229 return;
4230 }
4231 /* Find the matching slot in 'info'. */
4232 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4233 if (mdi->disk.raid_disk == n)
4234 break;
4235 if (!mdi) {
4236 pr_err("%s: cannot find raid disk %d\n",
4237 __func__, n);
4238 return;
4239 }
4240
4241 /* and find the 'dl' entry corresponding to that. */
4242 for (dl = ddf->dlist; dl; dl = dl->next)
4243 if (mdi->state_fd >= 0 &&
4244 mdi->disk.major == dl->major &&
4245 mdi->disk.minor == dl->minor)
4246 break;
4247 if (!dl) {
4248 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4249 __func__, n,
4250 mdi->disk.major, mdi->disk.minor);
4251 return;
4252 }
4253
4254 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4255 if (pd < 0 || pd != dl->pdnum) {
4256 /* disk doesn't currently exist or has changed.
4257 * If it is now in_sync, insert it. */
4258 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4259 __func__, dl->pdnum, dl->major, dl->minor,
4260 be32_to_cpu(dl->disk.refnum));
4261 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4262 __func__, inst, n_bvd,
4263 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4264 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4265 pd = dl->pdnum; /* FIXME: is this really correct ? */
4266 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4267 LBA_OFFSET(ddf, vc)[n_bvd] =
4268 cpu_to_be64(mdi->data_offset);
4269 be16_clear(ddf->phys->entries[pd].type,
4270 cpu_to_be16(DDF_Global_Spare));
4271 be16_set(ddf->phys->entries[pd].type,
4272 cpu_to_be16(DDF_Active_in_VD));
4273 ddf_set_updates_pending(ddf);
4274 }
4275 } else {
4276 be16 old = ddf->phys->entries[pd].state;
4277 if (state & DS_FAULTY)
4278 be16_set(ddf->phys->entries[pd].state,
4279 cpu_to_be16(DDF_Failed));
4280 if (state & DS_INSYNC) {
4281 be16_set(ddf->phys->entries[pd].state,
4282 cpu_to_be16(DDF_Online));
4283 be16_clear(ddf->phys->entries[pd].state,
4284 cpu_to_be16(DDF_Rebuilding));
4285 }
4286 if (!be16_eq(old, ddf->phys->entries[pd].state))
4287 ddf_set_updates_pending(ddf);
4288 }
4289
4290 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4291 be32_to_cpu(dl->disk.refnum), state,
4292 be16_to_cpu(ddf->phys->entries[pd].state));
4293
4294 /* Now we need to check the state of the array and update
4295 * virtual_disk.entries[n].state.
4296 * It needs to be one of "optimal", "degraded", "failed".
4297 * I don't understand 'deleted' or 'missing'.
4298 */
4299 state = get_svd_state(ddf, vcl);
4300
4301 if (ddf->virt->entries[inst].state !=
4302 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4303 | state)) {
4304
4305 ddf->virt->entries[inst].state =
4306 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4307 | state;
4308 ddf_set_updates_pending(ddf);
4309 }
4310
4311 }
4312
4313 static void ddf_sync_metadata(struct supertype *st)
4314 {
4315
4316 /*
4317 * Write all data to all devices.
4318 * Later, we might be able to track whether only local changes
4319 * have been made, or whether any global data has been changed,
4320 * but ddf is sufficiently weird that it probably always
4321 * changes global data ....
4322 */
4323 struct ddf_super *ddf = st->sb;
4324 if (!ddf->updates_pending)
4325 return;
4326 ddf->updates_pending = 0;
4327 __write_init_super_ddf(st);
4328 dprintf("ddf: sync_metadata\n");
4329 }
4330
4331 static int del_from_conflist(struct vcl **list, const char *guid)
4332 {
4333 struct vcl **p;
4334 int found = 0;
4335 for (p = list; p && *p; p = &((*p)->next))
4336 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4337 found = 1;
4338 *p = (*p)->next;
4339 }
4340 return found;
4341 }
4342
4343 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4344 {
4345 struct dl *dl;
4346 unsigned int vdnum, i;
4347 vdnum = find_vde_by_guid(ddf, guid);
4348 if (vdnum == DDF_NOTFOUND) {
4349 pr_err("%s: could not find VD %s\n", __func__,
4350 guid_str(guid));
4351 return -1;
4352 }
4353 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4354 pr_err("%s: could not find conf %s\n", __func__,
4355 guid_str(guid));
4356 return -1;
4357 }
4358 for (dl = ddf->dlist; dl; dl = dl->next)
4359 for (i = 0; i < ddf->max_part; i++)
4360 if (dl->vlist[i] != NULL &&
4361 !memcmp(dl->vlist[i]->conf.guid, guid,
4362 DDF_GUID_LEN))
4363 dl->vlist[i] = NULL;
4364 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4365 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4366 return 0;
4367 }
4368
4369 static int kill_subarray_ddf(struct supertype *st)
4370 {
4371 struct ddf_super *ddf = st->sb;
4372 /*
4373 * currentconf is set in container_content_ddf,
4374 * called with subarray arg
4375 */
4376 struct vcl *victim = ddf->currentconf;
4377 struct vd_config *conf;
4378 ddf->currentconf = NULL;
4379 unsigned int vdnum;
4380 if (!victim) {
4381 pr_err("%s: nothing to kill\n", __func__);
4382 return -1;
4383 }
4384 conf = &victim->conf;
4385 vdnum = find_vde_by_guid(ddf, conf->guid);
4386 if (vdnum == DDF_NOTFOUND) {
4387 pr_err("%s: could not find VD %s\n", __func__,
4388 guid_str(conf->guid));
4389 return -1;
4390 }
4391 if (st->update_tail) {
4392 struct virtual_disk *vd;
4393 int len = sizeof(struct virtual_disk)
4394 + sizeof(struct virtual_entry);
4395 vd = xmalloc(len);
4396 if (vd == NULL) {
4397 pr_err("%s: failed to allocate %d bytes\n", __func__,
4398 len);
4399 return -1;
4400 }
4401 memset(vd, 0 , len);
4402 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4403 vd->populated_vdes = cpu_to_be16(0);
4404 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4405 /* we use DDF_state_deleted as marker */
4406 vd->entries[0].state = DDF_state_deleted;
4407 append_metadata_update(st, vd, len);
4408 } else {
4409 _kill_subarray_ddf(ddf, conf->guid);
4410 ddf_set_updates_pending(ddf);
4411 ddf_sync_metadata(st);
4412 }
4413 return 0;
4414 }
4415
4416 static void copy_matching_bvd(struct ddf_super *ddf,
4417 struct vd_config *conf,
4418 const struct metadata_update *update)
4419 {
4420 unsigned int mppe =
4421 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4422 unsigned int len = ddf->conf_rec_len * 512;
4423 char *p;
4424 struct vd_config *vc;
4425 for (p = update->buf; p < update->buf + update->len; p += len) {
4426 vc = (struct vd_config *) p;
4427 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4428 memcpy(conf->phys_refnum, vc->phys_refnum,
4429 mppe * (sizeof(__u32) + sizeof(__u64)));
4430 return;
4431 }
4432 }
4433 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4434 conf->sec_elmnt_seq, guid_str(conf->guid));
4435 }
4436
4437 static void ddf_process_update(struct supertype *st,
4438 struct metadata_update *update)
4439 {
4440 /* Apply this update to the metadata.
4441 * The first 4 bytes are a DDF_*_MAGIC which guides
4442 * our actions.
4443 * Possible update are:
4444 * DDF_PHYS_RECORDS_MAGIC
4445 * Add a new physical device or remove an old one.
4446 * Changes to this record only happen implicitly.
4447 * used_pdes is the device number.
4448 * DDF_VIRT_RECORDS_MAGIC
4449 * Add a new VD. Possibly also change the 'access' bits.
4450 * populated_vdes is the entry number.
4451 * DDF_VD_CONF_MAGIC
4452 * New or updated VD. the VIRT_RECORD must already
4453 * exist. For an update, phys_refnum and lba_offset
4454 * (at least) are updated, and the VD_CONF must
4455 * be written to precisely those devices listed with
4456 * a phys_refnum.
4457 * DDF_SPARE_ASSIGN_MAGIC
4458 * replacement Spare Assignment Record... but for which device?
4459 *
4460 * So, e.g.:
4461 * - to create a new array, we send a VIRT_RECORD and
4462 * a VD_CONF. Then assemble and start the array.
4463 * - to activate a spare we send a VD_CONF to add the phys_refnum
4464 * and offset. This will also mark the spare as active with
4465 * a spare-assignment record.
4466 */
4467 struct ddf_super *ddf = st->sb;
4468 be32 *magic = (be32 *)update->buf;
4469 struct phys_disk *pd;
4470 struct virtual_disk *vd;
4471 struct vd_config *vc;
4472 struct vcl *vcl;
4473 struct dl *dl;
4474 unsigned int ent;
4475 unsigned int pdnum, pd2, len;
4476
4477 dprintf("Process update %x\n", be32_to_cpu(*magic));
4478
4479 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4480
4481 if (update->len != (sizeof(struct phys_disk) +
4482 sizeof(struct phys_disk_entry)))
4483 return;
4484 pd = (struct phys_disk*)update->buf;
4485
4486 ent = be16_to_cpu(pd->used_pdes);
4487 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4488 return;
4489 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4490 struct dl **dlp;
4491 /* removing this disk. */
4492 be16_set(ddf->phys->entries[ent].state,
4493 cpu_to_be16(DDF_Missing));
4494 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4495 struct dl *dl = *dlp;
4496 if (dl->pdnum == (signed)ent) {
4497 close(dl->fd);
4498 dl->fd = -1;
4499 /* FIXME this doesn't free
4500 * dl->devname */
4501 update->space = dl;
4502 *dlp = dl->next;
4503 break;
4504 }
4505 }
4506 ddf_set_updates_pending(ddf);
4507 return;
4508 }
4509 if (!all_ff(ddf->phys->entries[ent].guid))
4510 return;
4511 ddf->phys->entries[ent] = pd->entries[0];
4512 ddf->phys->used_pdes = cpu_to_be16
4513 (1 + be16_to_cpu(ddf->phys->used_pdes));
4514 ddf_set_updates_pending(ddf);
4515 if (ddf->add_list) {
4516 struct active_array *a;
4517 struct dl *al = ddf->add_list;
4518 ddf->add_list = al->next;
4519
4520 al->next = ddf->dlist;
4521 ddf->dlist = al;
4522
4523 /* As a device has been added, we should check
4524 * for any degraded devices that might make
4525 * use of this spare */
4526 for (a = st->arrays ; a; a=a->next)
4527 a->check_degraded = 1;
4528 }
4529 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4530
4531 if (update->len != (sizeof(struct virtual_disk) +
4532 sizeof(struct virtual_entry)))
4533 return;
4534 vd = (struct virtual_disk*)update->buf;
4535
4536 if (vd->entries[0].state == DDF_state_deleted) {
4537 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4538 return;
4539 } else {
4540
4541 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4542 if (ent != DDF_NOTFOUND) {
4543 dprintf("%s: VD %s exists already in slot %d\n",
4544 __func__, guid_str(vd->entries[0].guid),
4545 ent);
4546 return;
4547 }
4548 ent = find_unused_vde(ddf);
4549 if (ent == DDF_NOTFOUND)
4550 return;
4551 ddf->virt->entries[ent] = vd->entries[0];
4552 ddf->virt->populated_vdes =
4553 cpu_to_be16(
4554 1 + be16_to_cpu(
4555 ddf->virt->populated_vdes));
4556 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4557 __func__, guid_str(vd->entries[0].guid), ent,
4558 ddf->virt->entries[ent].state,
4559 ddf->virt->entries[ent].init_state);
4560 }
4561 ddf_set_updates_pending(ddf);
4562 }
4563
4564 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4565 vc = (struct vd_config*)update->buf;
4566 len = ddf->conf_rec_len * 512;
4567 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4568 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4569 __func__, guid_str(vc->guid), update->len,
4570 vc->sec_elmnt_count);
4571 return;
4572 }
4573 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4574 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4575 break;
4576 dprintf("%s: conf update for %s (%s)\n", __func__,
4577 guid_str(vc->guid), (vcl ? "old" : "new"));
4578 if (vcl) {
4579 /* An update, just copy the phys_refnum and lba_offset
4580 * fields
4581 */
4582 unsigned int i;
4583 unsigned int k;
4584 copy_matching_bvd(ddf, &vcl->conf, update);
4585 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4586 dprintf("BVD %u has %08x at %llu\n", 0,
4587 be32_to_cpu(vcl->conf.phys_refnum[k]),
4588 be64_to_cpu(LBA_OFFSET(ddf,
4589 &vcl->conf)[k]));
4590 for (i = 1; i < vc->sec_elmnt_count; i++) {
4591 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4592 update);
4593 for (k = 0; k < be16_to_cpu(
4594 vc->prim_elmnt_count); k++)
4595 dprintf("BVD %u has %08x at %llu\n", i,
4596 be32_to_cpu
4597 (vcl->other_bvds[i-1]->
4598 phys_refnum[k]),
4599 be64_to_cpu
4600 (LBA_OFFSET
4601 (ddf,
4602 vcl->other_bvds[i-1])[k]));
4603 }
4604 } else {
4605 /* A new VD_CONF */
4606 unsigned int i;
4607 if (!update->space)
4608 return;
4609 vcl = update->space;
4610 update->space = NULL;
4611 vcl->next = ddf->conflist;
4612 memcpy(&vcl->conf, vc, len);
4613 ent = find_vde_by_guid(ddf, vc->guid);
4614 if (ent == DDF_NOTFOUND)
4615 return;
4616 vcl->vcnum = ent;
4617 ddf->conflist = vcl;
4618 for (i = 1; i < vc->sec_elmnt_count; i++)
4619 memcpy(vcl->other_bvds[i-1],
4620 update->buf + len * i, len);
4621 }
4622 /* Set DDF_Transition on all Failed devices - to help
4623 * us detect those that are no longer in use
4624 */
4625 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4626 pdnum++)
4627 if (be16_and(ddf->phys->entries[pdnum].state,
4628 cpu_to_be16(DDF_Failed)))
4629 be16_set(ddf->phys->entries[pdnum].state,
4630 cpu_to_be16(DDF_Transition));
4631 /* Now make sure vlist is correct for each dl. */
4632 for (dl = ddf->dlist; dl; dl = dl->next) {
4633 unsigned int vn = 0;
4634 int in_degraded = 0;
4635 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4636 unsigned int dn, ibvd;
4637 const struct vd_config *conf;
4638 int vstate;
4639 dn = get_pd_index_from_refnum(vcl,
4640 dl->disk.refnum,
4641 ddf->mppe,
4642 &conf, &ibvd);
4643 if (dn == DDF_NOTFOUND)
4644 continue;
4645 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4646 dl->pdnum,
4647 be32_to_cpu(dl->disk.refnum),
4648 guid_str(conf->guid),
4649 conf->sec_elmnt_seq, vn);
4650 /* Clear the Transition flag */
4651 if (be16_and
4652 (ddf->phys->entries[dl->pdnum].state,
4653 cpu_to_be16(DDF_Failed)))
4654 be16_clear(ddf->phys
4655 ->entries[dl->pdnum].state,
4656 cpu_to_be16(DDF_Transition));
4657 dl->vlist[vn++] = vcl;
4658 vstate = ddf->virt->entries[vcl->vcnum].state
4659 & DDF_state_mask;
4660 if (vstate == DDF_state_degraded ||
4661 vstate == DDF_state_part_optimal)
4662 in_degraded = 1;
4663 }
4664 while (vn < ddf->max_part)
4665 dl->vlist[vn++] = NULL;
4666 if (dl->vlist[0]) {
4667 be16_clear(ddf->phys->entries[dl->pdnum].type,
4668 cpu_to_be16(DDF_Global_Spare));
4669 if (!be16_and(ddf->phys
4670 ->entries[dl->pdnum].type,
4671 cpu_to_be16(DDF_Active_in_VD))) {
4672 be16_set(ddf->phys
4673 ->entries[dl->pdnum].type,
4674 cpu_to_be16(DDF_Active_in_VD));
4675 if (in_degraded)
4676 be16_set(ddf->phys
4677 ->entries[dl->pdnum]
4678 .state,
4679 cpu_to_be16
4680 (DDF_Rebuilding));
4681 }
4682 }
4683 if (dl->spare) {
4684 be16_clear(ddf->phys->entries[dl->pdnum].type,
4685 cpu_to_be16(DDF_Global_Spare));
4686 be16_set(ddf->phys->entries[dl->pdnum].type,
4687 cpu_to_be16(DDF_Spare));
4688 }
4689 if (!dl->vlist[0] && !dl->spare) {
4690 be16_set(ddf->phys->entries[dl->pdnum].type,
4691 cpu_to_be16(DDF_Global_Spare));
4692 be16_clear(ddf->phys->entries[dl->pdnum].type,
4693 cpu_to_be16(DDF_Spare));
4694 be16_clear(ddf->phys->entries[dl->pdnum].type,
4695 cpu_to_be16(DDF_Active_in_VD));
4696 }
4697 }
4698
4699 /* Now remove any 'Failed' devices that are not part
4700 * of any VD. They will have the Transition flag set.
4701 * Once done, we need to update all dl->pdnum numbers.
4702 */
4703 pd2 = 0;
4704 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4705 pdnum++) {
4706 if (be16_and(ddf->phys->entries[pdnum].state,
4707 cpu_to_be16(DDF_Failed))
4708 && be16_and(ddf->phys->entries[pdnum].state,
4709 cpu_to_be16(DDF_Transition))) {
4710 /* skip this one unless in dlist*/
4711 for (dl = ddf->dlist; dl; dl = dl->next)
4712 if (dl->pdnum == (int)pdnum)
4713 break;
4714 if (!dl)
4715 continue;
4716 }
4717 if (pdnum == pd2)
4718 pd2++;
4719 else {
4720 ddf->phys->entries[pd2] =
4721 ddf->phys->entries[pdnum];
4722 for (dl = ddf->dlist; dl; dl = dl->next)
4723 if (dl->pdnum == (int)pdnum)
4724 dl->pdnum = pd2;
4725 pd2++;
4726 }
4727 }
4728 ddf->phys->used_pdes = cpu_to_be16(pd2);
4729 while (pd2 < pdnum) {
4730 memset(ddf->phys->entries[pd2].guid, 0xff,
4731 DDF_GUID_LEN);
4732 pd2++;
4733 }
4734
4735 ddf_set_updates_pending(ddf);
4736 }
4737 /* case DDF_SPARE_ASSIGN_MAGIC */
4738 }
4739
4740 static void ddf_prepare_update(struct supertype *st,
4741 struct metadata_update *update)
4742 {
4743 /* This update arrived at managemon.
4744 * We are about to pass it to monitor.
4745 * If a malloc is needed, do it here.
4746 */
4747 struct ddf_super *ddf = st->sb;
4748 be32 *magic = (be32 *)update->buf;
4749 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4750 struct vcl *vcl;
4751 struct vd_config *conf = (struct vd_config *) update->buf;
4752 if (posix_memalign(&update->space, 512,
4753 offsetof(struct vcl, conf)
4754 + ddf->conf_rec_len * 512) != 0) {
4755 update->space = NULL;
4756 return;
4757 }
4758 vcl = update->space;
4759 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4760 if (alloc_other_bvds(ddf, vcl) != 0) {
4761 free(update->space);
4762 update->space = NULL;
4763 }
4764 }
4765 }
4766
4767 /*
4768 * Check degraded state of a RAID10.
4769 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4770 */
4771 static int raid10_degraded(struct mdinfo *info)
4772 {
4773 int n_prim, n_bvds;
4774 int i;
4775 struct mdinfo *d;
4776 char *found;
4777 int ret = -1;
4778
4779 n_prim = info->array.layout & ~0x100;
4780 n_bvds = info->array.raid_disks / n_prim;
4781 found = xmalloc(n_bvds);
4782 if (found == NULL)
4783 return ret;
4784 memset(found, 0, n_bvds);
4785 for (d = info->devs; d; d = d->next) {
4786 i = d->disk.raid_disk / n_prim;
4787 if (i >= n_bvds) {
4788 pr_err("%s: BUG: invalid raid disk\n", __func__);
4789 goto out;
4790 }
4791 if (d->state_fd > 0)
4792 found[i]++;
4793 }
4794 ret = 2;
4795 for (i = 0; i < n_bvds; i++)
4796 if (!found[i]) {
4797 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4798 ret = 0;
4799 goto out;
4800 } else if (found[i] < n_prim) {
4801 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4802 n_bvds);
4803 ret = 1;
4804 }
4805 out:
4806 free(found);
4807 return ret;
4808 }
4809
4810 /*
4811 * Check if the array 'a' is degraded but not failed.
4812 * If it is, find as many spares as are available and needed and
4813 * arrange for their inclusion.
4814 * We only choose devices which are not already in the array,
4815 * and prefer those with a spare-assignment to this array.
4816 * otherwise we choose global spares - assuming always that
4817 * there is enough room.
4818 * For each spare that we assign, we return an 'mdinfo' which
4819 * describes the position for the device in the array.
4820 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4821 * the new phys_refnum and lba_offset values.
4822 *
4823 * Only worry about BVDs at the moment.
4824 */
4825 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4826 struct metadata_update **updates)
4827 {
4828 int working = 0;
4829 struct mdinfo *d;
4830 struct ddf_super *ddf = a->container->sb;
4831 int global_ok = 0;
4832 struct mdinfo *rv = NULL;
4833 struct mdinfo *di;
4834 struct metadata_update *mu;
4835 struct dl *dl;
4836 int i;
4837 unsigned int j;
4838 struct vcl *vcl;
4839 struct vd_config *vc;
4840 unsigned int n_bvd;
4841
4842 for (d = a->info.devs ; d ; d = d->next) {
4843 if ((d->curr_state & DS_FAULTY) &&
4844 d->state_fd >= 0)
4845 /* wait for Removal to happen */
4846 return NULL;
4847 if (d->state_fd >= 0)
4848 working ++;
4849 }
4850
4851 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4852 a->info.array.raid_disks,
4853 a->info.array.level);
4854 if (working == a->info.array.raid_disks)
4855 return NULL; /* array not degraded */
4856 switch (a->info.array.level) {
4857 case 1:
4858 if (working == 0)
4859 return NULL; /* failed */
4860 break;
4861 case 4:
4862 case 5:
4863 if (working < a->info.array.raid_disks - 1)
4864 return NULL; /* failed */
4865 break;
4866 case 6:
4867 if (working < a->info.array.raid_disks - 2)
4868 return NULL; /* failed */
4869 break;
4870 case 10:
4871 if (raid10_degraded(&a->info) < 1)
4872 return NULL;
4873 break;
4874 default: /* concat or stripe */
4875 return NULL; /* failed */
4876 }
4877
4878 /* For each slot, if it is not working, find a spare */
4879 dl = ddf->dlist;
4880 for (i = 0; i < a->info.array.raid_disks; i++) {
4881 for (d = a->info.devs ; d ; d = d->next)
4882 if (d->disk.raid_disk == i)
4883 break;
4884 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4885 if (d && (d->state_fd >= 0))
4886 continue;
4887
4888 /* OK, this device needs recovery. Find a spare */
4889 again:
4890 for ( ; dl ; dl = dl->next) {
4891 unsigned long long esize;
4892 unsigned long long pos;
4893 struct mdinfo *d2;
4894 int is_global = 0;
4895 int is_dedicated = 0;
4896 struct extent *ex;
4897 unsigned int j;
4898 be16 state = ddf->phys->entries[dl->pdnum].state;
4899 if (be16_and(state,
4900 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4901 !be16_and(state,
4902 cpu_to_be16(DDF_Online)))
4903 continue;
4904
4905 /* If in this array, skip */
4906 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4907 if (d2->state_fd >= 0 &&
4908 d2->disk.major == dl->major &&
4909 d2->disk.minor == dl->minor) {
4910 dprintf("%x:%x (%08x) already in array\n",
4911 dl->major, dl->minor,
4912 be32_to_cpu(dl->disk.refnum));
4913 break;
4914 }
4915 if (d2)
4916 continue;
4917 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4918 cpu_to_be16(DDF_Spare))) {
4919 /* Check spare assign record */
4920 if (dl->spare) {
4921 if (dl->spare->type & DDF_spare_dedicated) {
4922 /* check spare_ents for guid */
4923 for (j = 0 ;
4924 j < be16_to_cpu
4925 (dl->spare
4926 ->populated);
4927 j++) {
4928 if (memcmp(dl->spare->spare_ents[j].guid,
4929 ddf->virt->entries[a->info.container_member].guid,
4930 DDF_GUID_LEN) == 0)
4931 is_dedicated = 1;
4932 }
4933 } else
4934 is_global = 1;
4935 }
4936 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
4937 cpu_to_be16(DDF_Global_Spare))) {
4938 is_global = 1;
4939 } else if (!be16_and(ddf->phys
4940 ->entries[dl->pdnum].state,
4941 cpu_to_be16(DDF_Failed))) {
4942 /* we can possibly use some of this */
4943 is_global = 1;
4944 }
4945 if ( ! (is_dedicated ||
4946 (is_global && global_ok))) {
4947 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4948 is_dedicated, is_global);
4949 continue;
4950 }
4951
4952 /* We are allowed to use this device - is there space?
4953 * We need a->info.component_size sectors */
4954 ex = get_extents(ddf, dl);
4955 if (!ex) {
4956 dprintf("cannot get extents\n");
4957 continue;
4958 }
4959 j = 0; pos = 0;
4960 esize = 0;
4961
4962 do {
4963 esize = ex[j].start - pos;
4964 if (esize >= a->info.component_size)
4965 break;
4966 pos = ex[j].start + ex[j].size;
4967 j++;
4968 } while (ex[j-1].size);
4969
4970 free(ex);
4971 if (esize < a->info.component_size) {
4972 dprintf("%x:%x has no room: %llu %llu\n",
4973 dl->major, dl->minor,
4974 esize, a->info.component_size);
4975 /* No room */
4976 continue;
4977 }
4978
4979 /* Cool, we have a device with some space at pos */
4980 di = xcalloc(1, sizeof(*di));
4981 di->disk.number = i;
4982 di->disk.raid_disk = i;
4983 di->disk.major = dl->major;
4984 di->disk.minor = dl->minor;
4985 di->disk.state = 0;
4986 di->recovery_start = 0;
4987 di->data_offset = pos;
4988 di->component_size = a->info.component_size;
4989 di->container_member = dl->pdnum;
4990 di->next = rv;
4991 rv = di;
4992 dprintf("%x:%x (%08x) to be %d at %llu\n",
4993 dl->major, dl->minor,
4994 be32_to_cpu(dl->disk.refnum), i, pos);
4995
4996 break;
4997 }
4998 if (!dl && ! global_ok) {
4999 /* not enough dedicated spares, try global */
5000 global_ok = 1;
5001 dl = ddf->dlist;
5002 goto again;
5003 }
5004 }
5005
5006 if (!rv)
5007 /* No spares found */
5008 return rv;
5009 /* Now 'rv' has a list of devices to return.
5010 * Create a metadata_update record to update the
5011 * phys_refnum and lba_offset values
5012 */
5013 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5014 &n_bvd, &vcl);
5015 if (vc == NULL)
5016 return NULL;
5017
5018 mu = xmalloc(sizeof(*mu));
5019 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5020 free(mu);
5021 mu = NULL;
5022 }
5023
5024 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5025 mu->buf = xmalloc(mu->len);
5026 mu->space = NULL;
5027 mu->space_list = NULL;
5028 mu->next = *updates;
5029 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5030 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5031 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5032 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5033
5034 vc = (struct vd_config*)mu->buf;
5035 for (di = rv ; di ; di = di->next) {
5036 unsigned int i_sec, i_prim;
5037 i_sec = di->disk.raid_disk
5038 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5039 i_prim = di->disk.raid_disk
5040 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5041 vc = (struct vd_config *)(mu->buf
5042 + i_sec * ddf->conf_rec_len * 512);
5043 for (dl = ddf->dlist; dl; dl = dl->next)
5044 if (dl->major == di->disk.major
5045 && dl->minor == di->disk.minor)
5046 break;
5047 if (!dl) {
5048 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
5049 __func__, di->disk.raid_disk,
5050 di->disk.major, di->disk.minor);
5051 return NULL;
5052 }
5053 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5054 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5055 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5056 be32_to_cpu(vc->phys_refnum[i_prim]),
5057 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5058 }
5059 *updates = mu;
5060 return rv;
5061 }
5062 #endif /* MDASSEMBLE */
5063
5064 static int ddf_level_to_layout(int level)
5065 {
5066 switch(level) {
5067 case 0:
5068 case 1:
5069 return 0;
5070 case 5:
5071 return ALGORITHM_LEFT_SYMMETRIC;
5072 case 6:
5073 return ALGORITHM_ROTATING_N_CONTINUE;
5074 case 10:
5075 return 0x102;
5076 default:
5077 return UnSet;
5078 }
5079 }
5080
5081 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5082 {
5083 if (level && *level == UnSet)
5084 *level = LEVEL_CONTAINER;
5085
5086 if (level && layout && *layout == UnSet)
5087 *layout = ddf_level_to_layout(*level);
5088 }
5089
5090 struct superswitch super_ddf = {
5091 #ifndef MDASSEMBLE
5092 .examine_super = examine_super_ddf,
5093 .brief_examine_super = brief_examine_super_ddf,
5094 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5095 .export_examine_super = export_examine_super_ddf,
5096 .detail_super = detail_super_ddf,
5097 .brief_detail_super = brief_detail_super_ddf,
5098 .validate_geometry = validate_geometry_ddf,
5099 .write_init_super = write_init_super_ddf,
5100 .add_to_super = add_to_super_ddf,
5101 .remove_from_super = remove_from_super_ddf,
5102 .load_container = load_container_ddf,
5103 .copy_metadata = copy_metadata_ddf,
5104 .kill_subarray = kill_subarray_ddf,
5105 #endif
5106 .match_home = match_home_ddf,
5107 .uuid_from_super= uuid_from_super_ddf,
5108 .getinfo_super = getinfo_super_ddf,
5109 .update_super = update_super_ddf,
5110
5111 .avail_size = avail_size_ddf,
5112
5113 .compare_super = compare_super_ddf,
5114
5115 .load_super = load_super_ddf,
5116 .init_super = init_super_ddf,
5117 .store_super = store_super_ddf,
5118 .free_super = free_super_ddf,
5119 .match_metadata_desc = match_metadata_desc_ddf,
5120 .container_content = container_content_ddf,
5121 .default_geometry = default_geometry_ddf,
5122
5123 .external = 1,
5124
5125 #ifndef MDASSEMBLE
5126 /* for mdmon */
5127 .open_new = ddf_open_new,
5128 .set_array_state= ddf_set_array_state,
5129 .set_disk = ddf_set_disk,
5130 .sync_metadata = ddf_sync_metadata,
5131 .process_update = ddf_process_update,
5132 .prepare_update = ddf_prepare_update,
5133 .activate_spare = ddf_activate_spare,
5134 #endif
5135 .name = "ddf",
5136 };