]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: handle fake RAIDs with changing subarray UUIDs
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* Default for safe_mode_delay. Same value as for IMSM.
51 */
52 static const int DDF_SAFE_MODE_DELAY = 4000;
53
54 /* The DDF metadata handling.
55 * DDF metadata lives at the end of the device.
56 * The last 512 byte block provides an 'anchor' which is used to locate
57 * the rest of the metadata which usually lives immediately behind the anchor.
58 *
59 * Note:
60 * - all multibyte numeric fields are bigendian.
61 * - all strings are space padded.
62 *
63 */
64
65 typedef struct __be16 {
66 __u16 _v16;
67 } be16;
68 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
69 #define be16_and(x, y) ((x)._v16 & (y)._v16)
70 #define be16_or(x, y) ((x)._v16 | (y)._v16)
71 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
72 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
73
74 typedef struct __be32 {
75 __u32 _v32;
76 } be32;
77 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
78
79 typedef struct __be64 {
80 __u64 _v64;
81 } be64;
82 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
83
84 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
85 static inline be16 cpu_to_be16(__u16 x)
86 {
87 be16 be = { ._v16 = __cpu_to_be16(x) };
88 return be;
89 }
90
91 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
92 static inline be32 cpu_to_be32(__u32 x)
93 {
94 be32 be = { ._v32 = __cpu_to_be32(x) };
95 return be;
96 }
97
98 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
99 static inline be64 cpu_to_be64(__u64 x)
100 {
101 be64 be = { ._v64 = __cpu_to_be64(x) };
102 return be;
103 }
104
105 /* Primary Raid Level (PRL) */
106 #define DDF_RAID0 0x00
107 #define DDF_RAID1 0x01
108 #define DDF_RAID3 0x03
109 #define DDF_RAID4 0x04
110 #define DDF_RAID5 0x05
111 #define DDF_RAID1E 0x11
112 #define DDF_JBOD 0x0f
113 #define DDF_CONCAT 0x1f
114 #define DDF_RAID5E 0x15
115 #define DDF_RAID5EE 0x25
116 #define DDF_RAID6 0x06
117
118 /* Raid Level Qualifier (RLQ) */
119 #define DDF_RAID0_SIMPLE 0x00
120 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
121 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
122 #define DDF_RAID3_0 0x00 /* parity in first extent */
123 #define DDF_RAID3_N 0x01 /* parity in last extent */
124 #define DDF_RAID4_0 0x00 /* parity in first extent */
125 #define DDF_RAID4_N 0x01 /* parity in last extent */
126 /* these apply to raid5e and raid5ee as well */
127 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
128 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
129 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
130 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
131
132 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
133 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
134
135 /* Secondary RAID Level (SRL) */
136 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
137 #define DDF_2MIRRORED 0x01
138 #define DDF_2CONCAT 0x02
139 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
140
141 /* Magic numbers */
142 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
143 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
144 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
145 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
146 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
147 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
148 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
149 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
150 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
151 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
152
153 #define DDF_GUID_LEN 24
154 #define DDF_REVISION_0 "01.00.00"
155 #define DDF_REVISION_2 "01.02.00"
156
157 struct ddf_header {
158 be32 magic; /* DDF_HEADER_MAGIC */
159 be32 crc;
160 char guid[DDF_GUID_LEN];
161 char revision[8]; /* 01.02.00 */
162 be32 seq; /* starts at '1' */
163 be32 timestamp;
164 __u8 openflag;
165 __u8 foreignflag;
166 __u8 enforcegroups;
167 __u8 pad0; /* 0xff */
168 __u8 pad1[12]; /* 12 * 0xff */
169 /* 64 bytes so far */
170 __u8 header_ext[32]; /* reserved: fill with 0xff */
171 be64 primary_lba;
172 be64 secondary_lba;
173 __u8 type;
174 __u8 pad2[3]; /* 0xff */
175 be32 workspace_len; /* sectors for vendor space -
176 * at least 32768(sectors) */
177 be64 workspace_lba;
178 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
179 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
180 be16 max_partitions; /* i.e. max num of configuration
181 record entries per disk */
182 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
183 *12/512) */
184 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
185 __u8 pad3[54]; /* 0xff */
186 /* 192 bytes so far */
187 be32 controller_section_offset;
188 be32 controller_section_length;
189 be32 phys_section_offset;
190 be32 phys_section_length;
191 be32 virt_section_offset;
192 be32 virt_section_length;
193 be32 config_section_offset;
194 be32 config_section_length;
195 be32 data_section_offset;
196 be32 data_section_length;
197 be32 bbm_section_offset;
198 be32 bbm_section_length;
199 be32 diag_space_offset;
200 be32 diag_space_length;
201 be32 vendor_offset;
202 be32 vendor_length;
203 /* 256 bytes so far */
204 __u8 pad4[256]; /* 0xff */
205 };
206
207 /* type field */
208 #define DDF_HEADER_ANCHOR 0x00
209 #define DDF_HEADER_PRIMARY 0x01
210 #define DDF_HEADER_SECONDARY 0x02
211
212 /* The content of the 'controller section' - global scope */
213 struct ddf_controller_data {
214 be32 magic; /* DDF_CONTROLLER_MAGIC */
215 be32 crc;
216 char guid[DDF_GUID_LEN];
217 struct controller_type {
218 be16 vendor_id;
219 be16 device_id;
220 be16 sub_vendor_id;
221 be16 sub_device_id;
222 } type;
223 char product_id[16];
224 __u8 pad[8]; /* 0xff */
225 __u8 vendor_data[448];
226 };
227
228 /* The content of phys_section - global scope */
229 struct phys_disk {
230 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
231 be32 crc;
232 be16 used_pdes;
233 be16 max_pdes;
234 __u8 pad[52];
235 struct phys_disk_entry {
236 char guid[DDF_GUID_LEN];
237 be32 refnum;
238 be16 type;
239 be16 state;
240 be64 config_size; /* DDF structures must be after here */
241 char path[18]; /* another horrible structure really */
242 __u8 pad[6];
243 } entries[0];
244 };
245
246 /* phys_disk_entry.type is a bitmap - bigendian remember */
247 #define DDF_Forced_PD_GUID 1
248 #define DDF_Active_in_VD 2
249 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
250 #define DDF_Spare 8 /* overrides Global_spare */
251 #define DDF_Foreign 16
252 #define DDF_Legacy 32 /* no DDF on this device */
253
254 #define DDF_Interface_mask 0xf00
255 #define DDF_Interface_SCSI 0x100
256 #define DDF_Interface_SAS 0x200
257 #define DDF_Interface_SATA 0x300
258 #define DDF_Interface_FC 0x400
259
260 /* phys_disk_entry.state is a bigendian bitmap */
261 #define DDF_Online 1
262 #define DDF_Failed 2 /* overrides 1,4,8 */
263 #define DDF_Rebuilding 4
264 #define DDF_Transition 8
265 #define DDF_SMART 16
266 #define DDF_ReadErrors 32
267 #define DDF_Missing 64
268
269 /* The content of the virt_section global scope */
270 struct virtual_disk {
271 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
272 be32 crc;
273 be16 populated_vdes;
274 be16 max_vdes;
275 __u8 pad[52];
276 struct virtual_entry {
277 char guid[DDF_GUID_LEN];
278 be16 unit;
279 __u16 pad0; /* 0xffff */
280 be16 guid_crc;
281 be16 type;
282 __u8 state;
283 __u8 init_state;
284 __u8 pad1[14];
285 char name[16];
286 } entries[0];
287 };
288
289 /* virtual_entry.type is a bitmap - bigendian */
290 #define DDF_Shared 1
291 #define DDF_Enforce_Groups 2
292 #define DDF_Unicode 4
293 #define DDF_Owner_Valid 8
294
295 /* virtual_entry.state is a bigendian bitmap */
296 #define DDF_state_mask 0x7
297 #define DDF_state_optimal 0x0
298 #define DDF_state_degraded 0x1
299 #define DDF_state_deleted 0x2
300 #define DDF_state_missing 0x3
301 #define DDF_state_failed 0x4
302 #define DDF_state_part_optimal 0x5
303
304 #define DDF_state_morphing 0x8
305 #define DDF_state_inconsistent 0x10
306
307 /* virtual_entry.init_state is a bigendian bitmap */
308 #define DDF_initstate_mask 0x03
309 #define DDF_init_not 0x00
310 #define DDF_init_quick 0x01 /* initialisation is progress.
311 * i.e. 'state_inconsistent' */
312 #define DDF_init_full 0x02
313
314 #define DDF_access_mask 0xc0
315 #define DDF_access_rw 0x00
316 #define DDF_access_ro 0x80
317 #define DDF_access_blocked 0xc0
318
319 /* The content of the config_section - local scope
320 * It has multiple records each config_record_len sectors
321 * They can be vd_config or spare_assign
322 */
323
324 struct vd_config {
325 be32 magic; /* DDF_VD_CONF_MAGIC */
326 be32 crc;
327 char guid[DDF_GUID_LEN];
328 be32 timestamp;
329 be32 seqnum;
330 __u8 pad0[24];
331 be16 prim_elmnt_count;
332 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
333 __u8 prl;
334 __u8 rlq;
335 __u8 sec_elmnt_count;
336 __u8 sec_elmnt_seq;
337 __u8 srl;
338 be64 blocks; /* blocks per component could be different
339 * on different component devices...(only
340 * for concat I hope) */
341 be64 array_blocks; /* blocks in array */
342 __u8 pad1[8];
343 be32 spare_refs[8];
344 __u8 cache_pol[8];
345 __u8 bg_rate;
346 __u8 pad2[3];
347 __u8 pad3[52];
348 __u8 pad4[192];
349 __u8 v0[32]; /* reserved- 0xff */
350 __u8 v1[32]; /* reserved- 0xff */
351 __u8 v2[16]; /* reserved- 0xff */
352 __u8 v3[16]; /* reserved- 0xff */
353 __u8 vendor[32];
354 be32 phys_refnum[0]; /* refnum of each disk in sequence */
355 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
356 bvd are always the same size */
357 };
358 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
359
360 /* vd_config.cache_pol[7] is a bitmap */
361 #define DDF_cache_writeback 1 /* else writethrough */
362 #define DDF_cache_wadaptive 2 /* only applies if writeback */
363 #define DDF_cache_readahead 4
364 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
365 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
366 #define DDF_cache_wallowed 32 /* enable write caching */
367 #define DDF_cache_rallowed 64 /* enable read caching */
368
369 struct spare_assign {
370 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
371 be32 crc;
372 be32 timestamp;
373 __u8 reserved[7];
374 __u8 type;
375 be16 populated; /* SAEs used */
376 be16 max; /* max SAEs */
377 __u8 pad[8];
378 struct spare_assign_entry {
379 char guid[DDF_GUID_LEN];
380 be16 secondary_element;
381 __u8 pad[6];
382 } spare_ents[0];
383 };
384 /* spare_assign.type is a bitmap */
385 #define DDF_spare_dedicated 0x1 /* else global */
386 #define DDF_spare_revertible 0x2 /* else committable */
387 #define DDF_spare_active 0x4 /* else not active */
388 #define DDF_spare_affinity 0x8 /* enclosure affinity */
389
390 /* The data_section contents - local scope */
391 struct disk_data {
392 be32 magic; /* DDF_PHYS_DATA_MAGIC */
393 be32 crc;
394 char guid[DDF_GUID_LEN];
395 be32 refnum; /* crc of some magic drive data ... */
396 __u8 forced_ref; /* set when above was not result of magic */
397 __u8 forced_guid; /* set if guid was forced rather than magic */
398 __u8 vendor[32];
399 __u8 pad[442];
400 };
401
402 /* bbm_section content */
403 struct bad_block_log {
404 be32 magic;
405 be32 crc;
406 be16 entry_count;
407 be32 spare_count;
408 __u8 pad[10];
409 be64 first_spare;
410 struct mapped_block {
411 be64 defective_start;
412 be32 replacement_start;
413 be16 remap_count;
414 __u8 pad[2];
415 } entries[0];
416 };
417
418 /* Struct for internally holding ddf structures */
419 /* The DDF structure stored on each device is potentially
420 * quite different, as some data is global and some is local.
421 * The global data is:
422 * - ddf header
423 * - controller_data
424 * - Physical disk records
425 * - Virtual disk records
426 * The local data is:
427 * - Configuration records
428 * - Physical Disk data section
429 * ( and Bad block and vendor which I don't care about yet).
430 *
431 * The local data is parsed into separate lists as it is read
432 * and reconstructed for writing. This means that we only need
433 * to make config changes once and they are automatically
434 * propagated to all devices.
435 * Note that the ddf_super has space of the conf and disk data
436 * for this disk and also for a list of all such data.
437 * The list is only used for the superblock that is being
438 * built in Create or Assemble to describe the whole array.
439 */
440 struct ddf_super {
441 struct ddf_header anchor, primary, secondary;
442 struct ddf_controller_data controller;
443 struct ddf_header *active;
444 struct phys_disk *phys;
445 struct virtual_disk *virt;
446 char *conf;
447 int pdsize, vdsize;
448 unsigned int max_part, mppe, conf_rec_len;
449 int currentdev;
450 int updates_pending;
451 struct vcl {
452 union {
453 char space[512];
454 struct {
455 struct vcl *next;
456 unsigned int vcnum; /* index into ->virt */
457 struct vd_config **other_bvds;
458 __u64 *block_sizes; /* NULL if all the same */
459 };
460 };
461 struct vd_config conf;
462 } *conflist, *currentconf;
463 struct dl {
464 union {
465 char space[512];
466 struct {
467 struct dl *next;
468 int major, minor;
469 char *devname;
470 int fd;
471 unsigned long long size; /* sectors */
472 be64 primary_lba; /* sectors */
473 be64 secondary_lba; /* sectors */
474 be64 workspace_lba; /* sectors */
475 int pdnum; /* index in ->phys */
476 struct spare_assign *spare;
477 void *mdupdate; /* hold metadata update */
478
479 /* These fields used by auto-layout */
480 int raiddisk; /* slot to fill in autolayout */
481 __u64 esize;
482 };
483 };
484 struct disk_data disk;
485 struct vcl *vlist[0]; /* max_part in size */
486 } *dlist, *add_list;
487 };
488
489 #ifndef offsetof
490 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
491 #endif
492
493 #if DEBUG
494 static int all_ff(const char *guid);
495 static void pr_state(struct ddf_super *ddf, const char *msg)
496 {
497 unsigned int i;
498 dprintf("%s/%s: ", __func__, msg);
499 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
500 if (all_ff(ddf->virt->entries[i].guid))
501 continue;
502 dprintf("%u(s=%02x i=%02x) ", i,
503 ddf->virt->entries[i].state,
504 ddf->virt->entries[i].init_state);
505 }
506 dprintf("\n");
507 }
508 #else
509 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
510 #endif
511
512 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
513 {
514 ddf->updates_pending = 1;
515 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
516 pr_state(ddf, func);
517 }
518
519 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
520
521 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
522 be32 refnum, unsigned int nmax,
523 const struct vd_config **bvd,
524 unsigned int *idx);
525
526 static be32 calc_crc(void *buf, int len)
527 {
528 /* crcs are always at the same place as in the ddf_header */
529 struct ddf_header *ddf = buf;
530 be32 oldcrc = ddf->crc;
531 __u32 newcrc;
532 ddf->crc = cpu_to_be32(0xffffffff);
533
534 newcrc = crc32(0, buf, len);
535 ddf->crc = oldcrc;
536 /* The crc is store (like everything) bigendian, so convert
537 * here for simplicity
538 */
539 return cpu_to_be32(newcrc);
540 }
541
542 #define DDF_INVALID_LEVEL 0xff
543 #define DDF_NO_SECONDARY 0xff
544 static int err_bad_md_layout(const mdu_array_info_t *array)
545 {
546 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
547 array->level, array->layout, array->raid_disks);
548 return -1;
549 }
550
551 static int layout_md2ddf(const mdu_array_info_t *array,
552 struct vd_config *conf)
553 {
554 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
555 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
556 __u8 sec_elmnt_count = 1;
557 __u8 srl = DDF_NO_SECONDARY;
558
559 switch (array->level) {
560 case LEVEL_LINEAR:
561 prl = DDF_CONCAT;
562 break;
563 case 0:
564 rlq = DDF_RAID0_SIMPLE;
565 prl = DDF_RAID0;
566 break;
567 case 1:
568 switch (array->raid_disks) {
569 case 2:
570 rlq = DDF_RAID1_SIMPLE;
571 break;
572 case 3:
573 rlq = DDF_RAID1_MULTI;
574 break;
575 default:
576 return err_bad_md_layout(array);
577 }
578 prl = DDF_RAID1;
579 break;
580 case 4:
581 if (array->layout != 0)
582 return err_bad_md_layout(array);
583 rlq = DDF_RAID4_N;
584 prl = DDF_RAID4;
585 break;
586 case 5:
587 switch (array->layout) {
588 case ALGORITHM_LEFT_ASYMMETRIC:
589 rlq = DDF_RAID5_N_RESTART;
590 break;
591 case ALGORITHM_RIGHT_ASYMMETRIC:
592 rlq = DDF_RAID5_0_RESTART;
593 break;
594 case ALGORITHM_LEFT_SYMMETRIC:
595 rlq = DDF_RAID5_N_CONTINUE;
596 break;
597 case ALGORITHM_RIGHT_SYMMETRIC:
598 /* not mentioned in standard */
599 default:
600 return err_bad_md_layout(array);
601 }
602 prl = DDF_RAID5;
603 break;
604 case 6:
605 switch (array->layout) {
606 case ALGORITHM_ROTATING_N_RESTART:
607 rlq = DDF_RAID5_N_RESTART;
608 break;
609 case ALGORITHM_ROTATING_ZERO_RESTART:
610 rlq = DDF_RAID6_0_RESTART;
611 break;
612 case ALGORITHM_ROTATING_N_CONTINUE:
613 rlq = DDF_RAID5_N_CONTINUE;
614 break;
615 default:
616 return err_bad_md_layout(array);
617 }
618 prl = DDF_RAID6;
619 break;
620 case 10:
621 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
622 rlq = DDF_RAID1_SIMPLE;
623 prim_elmnt_count = cpu_to_be16(2);
624 sec_elmnt_count = array->raid_disks / 2;
625 } else if (array->raid_disks % 3 == 0
626 && array->layout == 0x103) {
627 rlq = DDF_RAID1_MULTI;
628 prim_elmnt_count = cpu_to_be16(3);
629 sec_elmnt_count = array->raid_disks / 3;
630 } else
631 return err_bad_md_layout(array);
632 srl = DDF_2SPANNED;
633 prl = DDF_RAID1;
634 break;
635 default:
636 return err_bad_md_layout(array);
637 }
638 conf->prl = prl;
639 conf->prim_elmnt_count = prim_elmnt_count;
640 conf->rlq = rlq;
641 conf->srl = srl;
642 conf->sec_elmnt_count = sec_elmnt_count;
643 return 0;
644 }
645
646 static int err_bad_ddf_layout(const struct vd_config *conf)
647 {
648 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
649 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
650 return -1;
651 }
652
653 static int layout_ddf2md(const struct vd_config *conf,
654 mdu_array_info_t *array)
655 {
656 int level = LEVEL_UNSUPPORTED;
657 int layout = 0;
658 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
659
660 if (conf->sec_elmnt_count > 1) {
661 /* see also check_secondary() */
662 if (conf->prl != DDF_RAID1 ||
663 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
664 pr_err("Unsupported secondary RAID level %u/%u\n",
665 conf->prl, conf->srl);
666 return -1;
667 }
668 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
669 layout = 0x102;
670 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
671 layout = 0x103;
672 else
673 return err_bad_ddf_layout(conf);
674 raiddisks *= conf->sec_elmnt_count;
675 level = 10;
676 goto good;
677 }
678
679 switch (conf->prl) {
680 case DDF_CONCAT:
681 level = LEVEL_LINEAR;
682 break;
683 case DDF_RAID0:
684 if (conf->rlq != DDF_RAID0_SIMPLE)
685 return err_bad_ddf_layout(conf);
686 level = 0;
687 break;
688 case DDF_RAID1:
689 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
690 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
691 return err_bad_ddf_layout(conf);
692 level = 1;
693 break;
694 case DDF_RAID4:
695 if (conf->rlq != DDF_RAID4_N)
696 return err_bad_ddf_layout(conf);
697 level = 4;
698 break;
699 case DDF_RAID5:
700 switch (conf->rlq) {
701 case DDF_RAID5_N_RESTART:
702 layout = ALGORITHM_LEFT_ASYMMETRIC;
703 break;
704 case DDF_RAID5_0_RESTART:
705 layout = ALGORITHM_RIGHT_ASYMMETRIC;
706 break;
707 case DDF_RAID5_N_CONTINUE:
708 layout = ALGORITHM_LEFT_SYMMETRIC;
709 break;
710 default:
711 return err_bad_ddf_layout(conf);
712 }
713 level = 5;
714 break;
715 case DDF_RAID6:
716 switch (conf->rlq) {
717 case DDF_RAID5_N_RESTART:
718 layout = ALGORITHM_ROTATING_N_RESTART;
719 break;
720 case DDF_RAID6_0_RESTART:
721 layout = ALGORITHM_ROTATING_ZERO_RESTART;
722 break;
723 case DDF_RAID5_N_CONTINUE:
724 layout = ALGORITHM_ROTATING_N_CONTINUE;
725 break;
726 default:
727 return err_bad_ddf_layout(conf);
728 }
729 level = 6;
730 break;
731 default:
732 return err_bad_ddf_layout(conf);
733 };
734
735 good:
736 array->level = level;
737 array->layout = layout;
738 array->raid_disks = raiddisks;
739 return 0;
740 }
741
742 static int load_ddf_header(int fd, unsigned long long lba,
743 unsigned long long size,
744 int type,
745 struct ddf_header *hdr, struct ddf_header *anchor)
746 {
747 /* read a ddf header (primary or secondary) from fd/lba
748 * and check that it is consistent with anchor
749 * Need to check:
750 * magic, crc, guid, rev, and LBA's header_type, and
751 * everything after header_type must be the same
752 */
753 if (lba >= size-1)
754 return 0;
755
756 if (lseek64(fd, lba<<9, 0) < 0)
757 return 0;
758
759 if (read(fd, hdr, 512) != 512)
760 return 0;
761
762 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
763 pr_err("%s: bad header magic\n", __func__);
764 return 0;
765 }
766 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
767 pr_err("%s: bad CRC\n", __func__);
768 return 0;
769 }
770 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
771 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
772 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
773 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
774 hdr->type != type ||
775 memcmp(anchor->pad2, hdr->pad2, 512 -
776 offsetof(struct ddf_header, pad2)) != 0) {
777 pr_err("%s: header mismatch\n", __func__);
778 return 0;
779 }
780
781 /* Looks good enough to me... */
782 return 1;
783 }
784
785 static void *load_section(int fd, struct ddf_super *super, void *buf,
786 be32 offset_be, be32 len_be, int check)
787 {
788 unsigned long long offset = be32_to_cpu(offset_be);
789 unsigned long long len = be32_to_cpu(len_be);
790 int dofree = (buf == NULL);
791
792 if (check)
793 if (len != 2 && len != 8 && len != 32
794 && len != 128 && len != 512)
795 return NULL;
796
797 if (len > 1024)
798 return NULL;
799 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
800 buf = NULL;
801
802 if (!buf)
803 return NULL;
804
805 if (super->active->type == 1)
806 offset += be64_to_cpu(super->active->primary_lba);
807 else
808 offset += be64_to_cpu(super->active->secondary_lba);
809
810 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
811 if (dofree)
812 free(buf);
813 return NULL;
814 }
815 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
816 if (dofree)
817 free(buf);
818 return NULL;
819 }
820 return buf;
821 }
822
823 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
824 {
825 unsigned long long dsize;
826
827 get_dev_size(fd, NULL, &dsize);
828
829 if (lseek64(fd, dsize-512, 0) < 0) {
830 if (devname)
831 pr_err("Cannot seek to anchor block on %s: %s\n",
832 devname, strerror(errno));
833 return 1;
834 }
835 if (read(fd, &super->anchor, 512) != 512) {
836 if (devname)
837 pr_err("Cannot read anchor block on %s: %s\n",
838 devname, strerror(errno));
839 return 1;
840 }
841 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
842 if (devname)
843 pr_err("no DDF anchor found on %s\n",
844 devname);
845 return 2;
846 }
847 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
848 if (devname)
849 pr_err("bad CRC on anchor on %s\n",
850 devname);
851 return 2;
852 }
853 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
854 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
855 if (devname)
856 pr_err("can only support super revision"
857 " %.8s and earlier, not %.8s on %s\n",
858 DDF_REVISION_2, super->anchor.revision,devname);
859 return 2;
860 }
861 super->active = NULL;
862 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
863 dsize >> 9, 1,
864 &super->primary, &super->anchor) == 0) {
865 if (devname)
866 pr_err("Failed to load primary DDF header "
867 "on %s\n", devname);
868 } else
869 super->active = &super->primary;
870
871 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
872 dsize >> 9, 2,
873 &super->secondary, &super->anchor)) {
874 if (super->active == NULL
875 || (be32_to_cpu(super->primary.seq)
876 < be32_to_cpu(super->secondary.seq) &&
877 !super->secondary.openflag)
878 || (be32_to_cpu(super->primary.seq)
879 == be32_to_cpu(super->secondary.seq) &&
880 super->primary.openflag && !super->secondary.openflag)
881 )
882 super->active = &super->secondary;
883 } else if (devname)
884 pr_err("Failed to load secondary DDF header on %s\n",
885 devname);
886 if (super->active == NULL)
887 return 2;
888 return 0;
889 }
890
891 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
892 {
893 void *ok;
894 ok = load_section(fd, super, &super->controller,
895 super->active->controller_section_offset,
896 super->active->controller_section_length,
897 0);
898 super->phys = load_section(fd, super, NULL,
899 super->active->phys_section_offset,
900 super->active->phys_section_length,
901 1);
902 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
903
904 super->virt = load_section(fd, super, NULL,
905 super->active->virt_section_offset,
906 super->active->virt_section_length,
907 1);
908 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
909 if (!ok ||
910 !super->phys ||
911 !super->virt) {
912 free(super->phys);
913 free(super->virt);
914 super->phys = NULL;
915 super->virt = NULL;
916 return 2;
917 }
918 super->conflist = NULL;
919 super->dlist = NULL;
920
921 super->max_part = be16_to_cpu(super->active->max_partitions);
922 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
923 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
924 return 0;
925 }
926
927 #define DDF_UNUSED_BVD 0xff
928 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
929 {
930 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
931 unsigned int i, vdsize;
932 void *p;
933 if (n_vds == 0) {
934 vcl->other_bvds = NULL;
935 return 0;
936 }
937 vdsize = ddf->conf_rec_len * 512;
938 if (posix_memalign(&p, 512, n_vds *
939 (vdsize + sizeof(struct vd_config *))) != 0)
940 return -1;
941 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
942 for (i = 0; i < n_vds; i++) {
943 vcl->other_bvds[i] = p + i * vdsize;
944 memset(vcl->other_bvds[i], 0, vdsize);
945 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
946 }
947 return 0;
948 }
949
950 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
951 unsigned int len)
952 {
953 int i;
954 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
955 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
956 break;
957
958 if (i < vcl->conf.sec_elmnt_count-1) {
959 if (be32_to_cpu(vd->seqnum) <=
960 be32_to_cpu(vcl->other_bvds[i]->seqnum))
961 return;
962 } else {
963 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
964 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
965 break;
966 if (i == vcl->conf.sec_elmnt_count-1) {
967 pr_err("no space for sec level config %u, count is %u\n",
968 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
969 return;
970 }
971 }
972 memcpy(vcl->other_bvds[i], vd, len);
973 }
974
975 static int load_ddf_local(int fd, struct ddf_super *super,
976 char *devname, int keep)
977 {
978 struct dl *dl;
979 struct stat stb;
980 char *conf;
981 unsigned int i;
982 unsigned int confsec;
983 int vnum;
984 unsigned int max_virt_disks = be16_to_cpu
985 (super->active->max_vd_entries);
986 unsigned long long dsize;
987
988 /* First the local disk info */
989 if (posix_memalign((void**)&dl, 512,
990 sizeof(*dl) +
991 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
992 pr_err("%s could not allocate disk info buffer\n",
993 __func__);
994 return 1;
995 }
996
997 load_section(fd, super, &dl->disk,
998 super->active->data_section_offset,
999 super->active->data_section_length,
1000 0);
1001 dl->devname = devname ? xstrdup(devname) : NULL;
1002
1003 fstat(fd, &stb);
1004 dl->major = major(stb.st_rdev);
1005 dl->minor = minor(stb.st_rdev);
1006 dl->next = super->dlist;
1007 dl->fd = keep ? fd : -1;
1008
1009 dl->size = 0;
1010 if (get_dev_size(fd, devname, &dsize))
1011 dl->size = dsize >> 9;
1012 /* If the disks have different sizes, the LBAs will differ
1013 * between phys disks.
1014 * At this point here, the values in super->active must be valid
1015 * for this phys disk. */
1016 dl->primary_lba = super->active->primary_lba;
1017 dl->secondary_lba = super->active->secondary_lba;
1018 dl->workspace_lba = super->active->workspace_lba;
1019 dl->spare = NULL;
1020 for (i = 0 ; i < super->max_part ; i++)
1021 dl->vlist[i] = NULL;
1022 super->dlist = dl;
1023 dl->pdnum = -1;
1024 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1025 if (memcmp(super->phys->entries[i].guid,
1026 dl->disk.guid, DDF_GUID_LEN) == 0)
1027 dl->pdnum = i;
1028
1029 /* Now the config list. */
1030 /* 'conf' is an array of config entries, some of which are
1031 * probably invalid. Those which are good need to be copied into
1032 * the conflist
1033 */
1034
1035 conf = load_section(fd, super, super->conf,
1036 super->active->config_section_offset,
1037 super->active->config_section_length,
1038 0);
1039 super->conf = conf;
1040 vnum = 0;
1041 for (confsec = 0;
1042 confsec < be32_to_cpu(super->active->config_section_length);
1043 confsec += super->conf_rec_len) {
1044 struct vd_config *vd =
1045 (struct vd_config *)((char*)conf + confsec*512);
1046 struct vcl *vcl;
1047
1048 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1049 if (dl->spare)
1050 continue;
1051 if (posix_memalign((void**)&dl->spare, 512,
1052 super->conf_rec_len*512) != 0) {
1053 pr_err("%s could not allocate spare info buf\n",
1054 __func__);
1055 return 1;
1056 }
1057
1058 memcpy(dl->spare, vd, super->conf_rec_len*512);
1059 continue;
1060 }
1061 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1062 continue;
1063 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1064 if (memcmp(vcl->conf.guid,
1065 vd->guid, DDF_GUID_LEN) == 0)
1066 break;
1067 }
1068
1069 if (vcl) {
1070 dl->vlist[vnum++] = vcl;
1071 if (vcl->other_bvds != NULL &&
1072 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1073 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1074 continue;
1075 }
1076 if (be32_to_cpu(vd->seqnum) <=
1077 be32_to_cpu(vcl->conf.seqnum))
1078 continue;
1079 } else {
1080 if (posix_memalign((void**)&vcl, 512,
1081 (super->conf_rec_len*512 +
1082 offsetof(struct vcl, conf))) != 0) {
1083 pr_err("%s could not allocate vcl buf\n",
1084 __func__);
1085 return 1;
1086 }
1087 vcl->next = super->conflist;
1088 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1089 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1090 if (alloc_other_bvds(super, vcl) != 0) {
1091 pr_err("%s could not allocate other bvds\n",
1092 __func__);
1093 free(vcl);
1094 return 1;
1095 };
1096 super->conflist = vcl;
1097 dl->vlist[vnum++] = vcl;
1098 }
1099 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1100 for (i=0; i < max_virt_disks ; i++)
1101 if (memcmp(super->virt->entries[i].guid,
1102 vcl->conf.guid, DDF_GUID_LEN)==0)
1103 break;
1104 if (i < max_virt_disks)
1105 vcl->vcnum = i;
1106 }
1107
1108 return 0;
1109 }
1110
1111 #ifndef MDASSEMBLE
1112 static int load_super_ddf_all(struct supertype *st, int fd,
1113 void **sbp, char *devname);
1114 #endif
1115
1116 static void free_super_ddf(struct supertype *st);
1117
1118 static int load_super_ddf(struct supertype *st, int fd,
1119 char *devname)
1120 {
1121 unsigned long long dsize;
1122 struct ddf_super *super;
1123 int rv;
1124
1125 if (get_dev_size(fd, devname, &dsize) == 0)
1126 return 1;
1127
1128 if (!st->ignore_hw_compat && test_partition(fd))
1129 /* DDF is not allowed on partitions */
1130 return 1;
1131
1132 /* 32M is a lower bound */
1133 if (dsize <= 32*1024*1024) {
1134 if (devname)
1135 pr_err("%s is too small for ddf: "
1136 "size is %llu sectors.\n",
1137 devname, dsize>>9);
1138 return 1;
1139 }
1140 if (dsize & 511) {
1141 if (devname)
1142 pr_err("%s is an odd size for ddf: "
1143 "size is %llu bytes.\n",
1144 devname, dsize);
1145 return 1;
1146 }
1147
1148 free_super_ddf(st);
1149
1150 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1151 pr_err("malloc of %zu failed.\n",
1152 sizeof(*super));
1153 return 1;
1154 }
1155 memset(super, 0, sizeof(*super));
1156
1157 rv = load_ddf_headers(fd, super, devname);
1158 if (rv) {
1159 free(super);
1160 return rv;
1161 }
1162
1163 /* Have valid headers and have chosen the best. Let's read in the rest*/
1164
1165 rv = load_ddf_global(fd, super, devname);
1166
1167 if (rv) {
1168 if (devname)
1169 pr_err("Failed to load all information "
1170 "sections on %s\n", devname);
1171 free(super);
1172 return rv;
1173 }
1174
1175 rv = load_ddf_local(fd, super, devname, 0);
1176
1177 if (rv) {
1178 if (devname)
1179 pr_err("Failed to load all information "
1180 "sections on %s\n", devname);
1181 free(super);
1182 return rv;
1183 }
1184
1185 /* Should possibly check the sections .... */
1186
1187 st->sb = super;
1188 if (st->ss == NULL) {
1189 st->ss = &super_ddf;
1190 st->minor_version = 0;
1191 st->max_devs = 512;
1192 }
1193 return 0;
1194
1195 }
1196
1197 static void free_super_ddf(struct supertype *st)
1198 {
1199 struct ddf_super *ddf = st->sb;
1200 if (ddf == NULL)
1201 return;
1202 free(ddf->phys);
1203 free(ddf->virt);
1204 free(ddf->conf);
1205 while (ddf->conflist) {
1206 struct vcl *v = ddf->conflist;
1207 ddf->conflist = v->next;
1208 if (v->block_sizes)
1209 free(v->block_sizes);
1210 if (v->other_bvds)
1211 /*
1212 v->other_bvds[0] points to beginning of buffer,
1213 see alloc_other_bvds()
1214 */
1215 free(v->other_bvds[0]);
1216 free(v);
1217 }
1218 while (ddf->dlist) {
1219 struct dl *d = ddf->dlist;
1220 ddf->dlist = d->next;
1221 if (d->fd >= 0)
1222 close(d->fd);
1223 if (d->spare)
1224 free(d->spare);
1225 free(d);
1226 }
1227 while (ddf->add_list) {
1228 struct dl *d = ddf->add_list;
1229 ddf->add_list = d->next;
1230 if (d->fd >= 0)
1231 close(d->fd);
1232 if (d->spare)
1233 free(d->spare);
1234 free(d);
1235 }
1236 free(ddf);
1237 st->sb = NULL;
1238 }
1239
1240 static struct supertype *match_metadata_desc_ddf(char *arg)
1241 {
1242 /* 'ddf' only support containers */
1243 struct supertype *st;
1244 if (strcmp(arg, "ddf") != 0 &&
1245 strcmp(arg, "default") != 0
1246 )
1247 return NULL;
1248
1249 st = xcalloc(1, sizeof(*st));
1250 st->ss = &super_ddf;
1251 st->max_devs = 512;
1252 st->minor_version = 0;
1253 st->sb = NULL;
1254 return st;
1255 }
1256
1257 #ifndef MDASSEMBLE
1258
1259 static mapping_t ddf_state[] = {
1260 { "Optimal", 0},
1261 { "Degraded", 1},
1262 { "Deleted", 2},
1263 { "Missing", 3},
1264 { "Failed", 4},
1265 { "Partially Optimal", 5},
1266 { "-reserved-", 6},
1267 { "-reserved-", 7},
1268 { NULL, 0}
1269 };
1270
1271 static mapping_t ddf_init_state[] = {
1272 { "Not Initialised", 0},
1273 { "QuickInit in Progress", 1},
1274 { "Fully Initialised", 2},
1275 { "*UNKNOWN*", 3},
1276 { NULL, 0}
1277 };
1278 static mapping_t ddf_access[] = {
1279 { "Read/Write", 0},
1280 { "Reserved", 1},
1281 { "Read Only", 2},
1282 { "Blocked (no access)", 3},
1283 { NULL ,0}
1284 };
1285
1286 static mapping_t ddf_level[] = {
1287 { "RAID0", DDF_RAID0},
1288 { "RAID1", DDF_RAID1},
1289 { "RAID3", DDF_RAID3},
1290 { "RAID4", DDF_RAID4},
1291 { "RAID5", DDF_RAID5},
1292 { "RAID1E",DDF_RAID1E},
1293 { "JBOD", DDF_JBOD},
1294 { "CONCAT",DDF_CONCAT},
1295 { "RAID5E",DDF_RAID5E},
1296 { "RAID5EE",DDF_RAID5EE},
1297 { "RAID6", DDF_RAID6},
1298 { NULL, 0}
1299 };
1300 static mapping_t ddf_sec_level[] = {
1301 { "Striped", DDF_2STRIPED},
1302 { "Mirrored", DDF_2MIRRORED},
1303 { "Concat", DDF_2CONCAT},
1304 { "Spanned", DDF_2SPANNED},
1305 { NULL, 0}
1306 };
1307 #endif
1308
1309 static int all_ff(const char *guid)
1310 {
1311 int i;
1312 for (i = 0; i < DDF_GUID_LEN; i++)
1313 if (guid[i] != (char)0xff)
1314 return 0;
1315 return 1;
1316 }
1317
1318 static const char *guid_str(const char *guid)
1319 {
1320 static char buf[DDF_GUID_LEN*2+1];
1321 int i;
1322 char *p = buf;
1323 for (i = 0; i < DDF_GUID_LEN; i++) {
1324 unsigned char c = guid[i];
1325 if (c >= 32 && c < 127)
1326 p += sprintf(p, "%c", c);
1327 else
1328 p += sprintf(p, "%02x", c);
1329 }
1330 *p = '\0';
1331 return (const char *) buf;
1332 }
1333
1334 #ifndef MDASSEMBLE
1335 static void print_guid(char *guid, int tstamp)
1336 {
1337 /* A GUIDs are part (or all) ASCII and part binary.
1338 * They tend to be space padded.
1339 * We print the GUID in HEX, then in parentheses add
1340 * any initial ASCII sequence, and a possible
1341 * time stamp from bytes 16-19
1342 */
1343 int l = DDF_GUID_LEN;
1344 int i;
1345
1346 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1347 if ((i&3)==0 && i != 0) printf(":");
1348 printf("%02X", guid[i]&255);
1349 }
1350
1351 printf("\n (");
1352 while (l && guid[l-1] == ' ')
1353 l--;
1354 for (i=0 ; i<l ; i++) {
1355 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1356 fputc(guid[i], stdout);
1357 else
1358 break;
1359 }
1360 if (tstamp) {
1361 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1362 char tbuf[100];
1363 struct tm *tm;
1364 tm = localtime(&then);
1365 strftime(tbuf, 100, " %D %T",tm);
1366 fputs(tbuf, stdout);
1367 }
1368 printf(")");
1369 }
1370
1371 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1372 {
1373 int crl = sb->conf_rec_len;
1374 struct vcl *vcl;
1375
1376 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1377 unsigned int i;
1378 struct vd_config *vc = &vcl->conf;
1379
1380 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1381 continue;
1382 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1383 continue;
1384
1385 /* Ok, we know about this VD, let's give more details */
1386 printf(" Raid Devices[%d] : %d (", n,
1387 be16_to_cpu(vc->prim_elmnt_count));
1388 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1389 int j;
1390 int cnt = be16_to_cpu(sb->phys->used_pdes);
1391 for (j=0; j<cnt; j++)
1392 if (be32_eq(vc->phys_refnum[i],
1393 sb->phys->entries[j].refnum))
1394 break;
1395 if (i) printf(" ");
1396 if (j < cnt)
1397 printf("%d", j);
1398 else
1399 printf("--");
1400 }
1401 printf(")\n");
1402 if (vc->chunk_shift != 255)
1403 printf(" Chunk Size[%d] : %d sectors\n", n,
1404 1 << vc->chunk_shift);
1405 printf(" Raid Level[%d] : %s\n", n,
1406 map_num(ddf_level, vc->prl)?:"-unknown-");
1407 if (vc->sec_elmnt_count != 1) {
1408 printf(" Secondary Position[%d] : %d of %d\n", n,
1409 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1410 printf(" Secondary Level[%d] : %s\n", n,
1411 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1412 }
1413 printf(" Device Size[%d] : %llu\n", n,
1414 be64_to_cpu(vc->blocks)/2);
1415 printf(" Array Size[%d] : %llu\n", n,
1416 be64_to_cpu(vc->array_blocks)/2);
1417 }
1418 }
1419
1420 static void examine_vds(struct ddf_super *sb)
1421 {
1422 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1423 unsigned int i;
1424 printf(" Virtual Disks : %d\n", cnt);
1425
1426 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1427 struct virtual_entry *ve = &sb->virt->entries[i];
1428 if (all_ff(ve->guid))
1429 continue;
1430 printf("\n");
1431 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1432 printf("\n");
1433 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1434 printf(" state[%d] : %s, %s%s\n", i,
1435 map_num(ddf_state, ve->state & 7),
1436 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1437 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1438 printf(" init state[%d] : %s\n", i,
1439 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1440 printf(" access[%d] : %s\n", i,
1441 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1442 printf(" Name[%d] : %.16s\n", i, ve->name);
1443 examine_vd(i, sb, ve->guid);
1444 }
1445 if (cnt) printf("\n");
1446 }
1447
1448 static void examine_pds(struct ddf_super *sb)
1449 {
1450 int cnt = be16_to_cpu(sb->phys->used_pdes);
1451 int i;
1452 struct dl *dl;
1453 printf(" Physical Disks : %d\n", cnt);
1454 printf(" Number RefNo Size Device Type/State\n");
1455
1456 for (i=0 ; i<cnt ; i++) {
1457 struct phys_disk_entry *pd = &sb->phys->entries[i];
1458 int type = be16_to_cpu(pd->type);
1459 int state = be16_to_cpu(pd->state);
1460
1461 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1462 //printf("\n");
1463 printf(" %3d %08x ", i,
1464 be32_to_cpu(pd->refnum));
1465 printf("%8lluK ",
1466 be64_to_cpu(pd->config_size)>>1);
1467 for (dl = sb->dlist; dl ; dl = dl->next) {
1468 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1469 char *dv = map_dev(dl->major, dl->minor, 0);
1470 if (dv) {
1471 printf("%-15s", dv);
1472 break;
1473 }
1474 }
1475 }
1476 if (!dl)
1477 printf("%15s","");
1478 printf(" %s%s%s%s%s",
1479 (type&2) ? "active":"",
1480 (type&4) ? "Global-Spare":"",
1481 (type&8) ? "spare" : "",
1482 (type&16)? ", foreign" : "",
1483 (type&32)? "pass-through" : "");
1484 if (state & DDF_Failed)
1485 /* This over-rides these three */
1486 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1487 printf("/%s%s%s%s%s%s%s",
1488 (state&1)? "Online": "Offline",
1489 (state&2)? ", Failed": "",
1490 (state&4)? ", Rebuilding": "",
1491 (state&8)? ", in-transition": "",
1492 (state&16)? ", SMART-errors": "",
1493 (state&32)? ", Unrecovered-Read-Errors": "",
1494 (state&64)? ", Missing" : "");
1495 printf("\n");
1496 }
1497 }
1498
1499 static void examine_super_ddf(struct supertype *st, char *homehost)
1500 {
1501 struct ddf_super *sb = st->sb;
1502
1503 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1504 printf(" Version : %.8s\n", sb->anchor.revision);
1505 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1506 printf("\n");
1507 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1508 printf("\n");
1509 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1510 printf(" Redundant hdr : %s\n", be32_eq(sb->secondary.magic,
1511 DDF_HEADER_MAGIC)
1512 ?"yes" : "no");
1513 examine_vds(sb);
1514 examine_pds(sb);
1515 }
1516
1517 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1518
1519 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1520 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1521
1522 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1523 {
1524 /*
1525 * Figure out the VD number for this supertype.
1526 * Returns DDF_CONTAINER for the container itself,
1527 * and DDF_NOTFOUND on error.
1528 */
1529 struct ddf_super *ddf = st->sb;
1530 struct mdinfo *sra;
1531 char *sub, *end;
1532 unsigned int vcnum;
1533
1534 if (*st->container_devnm == '\0')
1535 return DDF_CONTAINER;
1536
1537 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1538 if (!sra || sra->array.major_version != -1 ||
1539 sra->array.minor_version != -2 ||
1540 !is_subarray(sra->text_version))
1541 return DDF_NOTFOUND;
1542
1543 sub = strchr(sra->text_version + 1, '/');
1544 if (sub != NULL)
1545 vcnum = strtoul(sub + 1, &end, 10);
1546 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1547 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1548 return DDF_NOTFOUND;
1549
1550 return vcnum;
1551 }
1552
1553 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1554 {
1555 /* We just write a generic DDF ARRAY entry
1556 */
1557 struct mdinfo info;
1558 char nbuf[64];
1559 getinfo_super_ddf(st, &info, NULL);
1560 fname_from_uuid(st, &info, nbuf, ':');
1561
1562 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1563 }
1564
1565 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1566 {
1567 /* We just write a generic DDF ARRAY entry
1568 */
1569 struct ddf_super *ddf = st->sb;
1570 struct mdinfo info;
1571 unsigned int i;
1572 char nbuf[64];
1573 getinfo_super_ddf(st, &info, NULL);
1574 fname_from_uuid(st, &info, nbuf, ':');
1575
1576 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1577 struct virtual_entry *ve = &ddf->virt->entries[i];
1578 struct vcl vcl;
1579 char nbuf1[64];
1580 if (all_ff(ve->guid))
1581 continue;
1582 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1583 ddf->currentconf =&vcl;
1584 vcl.vcnum = i;
1585 uuid_from_super_ddf(st, info.uuid);
1586 fname_from_uuid(st, &info, nbuf1, ':');
1587 printf("ARRAY container=%s member=%d UUID=%s\n",
1588 nbuf+5, i, nbuf1+5);
1589 }
1590 }
1591
1592 static void export_examine_super_ddf(struct supertype *st)
1593 {
1594 struct mdinfo info;
1595 char nbuf[64];
1596 getinfo_super_ddf(st, &info, NULL);
1597 fname_from_uuid(st, &info, nbuf, ':');
1598 printf("MD_METADATA=ddf\n");
1599 printf("MD_LEVEL=container\n");
1600 printf("MD_UUID=%s\n", nbuf+5);
1601 printf("MD_DEVICES=%u\n",
1602 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1603 }
1604
1605 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1606 {
1607 void *buf;
1608 unsigned long long dsize, offset;
1609 int bytes;
1610 struct ddf_header *ddf;
1611 int written = 0;
1612
1613 /* The meta consists of an anchor, a primary, and a secondary.
1614 * This all lives at the end of the device.
1615 * So it is easiest to find the earliest of primary and
1616 * secondary, and copy everything from there.
1617 *
1618 * Anchor is 512 from end It contains primary_lba and secondary_lba
1619 * we choose one of those
1620 */
1621
1622 if (posix_memalign(&buf, 4096, 4096) != 0)
1623 return 1;
1624
1625 if (!get_dev_size(from, NULL, &dsize))
1626 goto err;
1627
1628 if (lseek64(from, dsize-512, 0) < 0)
1629 goto err;
1630 if (read(from, buf, 512) != 512)
1631 goto err;
1632 ddf = buf;
1633 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1634 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1635 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1636 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1637 goto err;
1638
1639 offset = dsize - 512;
1640 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1641 offset = be64_to_cpu(ddf->primary_lba) << 9;
1642 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1643 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1644
1645 bytes = dsize - offset;
1646
1647 if (lseek64(from, offset, 0) < 0 ||
1648 lseek64(to, offset, 0) < 0)
1649 goto err;
1650 while (written < bytes) {
1651 int n = bytes - written;
1652 if (n > 4096)
1653 n = 4096;
1654 if (read(from, buf, n) != n)
1655 goto err;
1656 if (write(to, buf, n) != n)
1657 goto err;
1658 written += n;
1659 }
1660 free(buf);
1661 return 0;
1662 err:
1663 free(buf);
1664 return 1;
1665 }
1666
1667 static void detail_super_ddf(struct supertype *st, char *homehost)
1668 {
1669 /* FIXME later
1670 * Could print DDF GUID
1671 * Need to find which array
1672 * If whole, briefly list all arrays
1673 * If one, give name
1674 */
1675 }
1676
1677 static const char *vendors_with_variable_volume_UUID[] = {
1678 "LSI ",
1679 };
1680
1681 static int volume_id_is_reliable(const struct ddf_super *ddf)
1682 {
1683 int n = sizeof(vendors_with_variable_volume_UUID) /
1684 sizeof(vendors_with_variable_volume_UUID[0]);
1685 int i;
1686 for (i = 0; i < n; i++)
1687 if (!memcmp(ddf->controller.guid,
1688 vendors_with_variable_volume_UUID[i], 8))
1689 return 0;
1690 return 1;
1691 }
1692
1693 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1694 unsigned int vcnum, int uuid[4])
1695 {
1696 char buf[DDF_GUID_LEN+18], sha[20], *p;
1697 struct sha1_ctx ctx;
1698 if (volume_id_is_reliable(ddf)) {
1699 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1700 return;
1701 }
1702 /*
1703 * Some fake RAID BIOSes (in particular, LSI ones) change the
1704 * VD GUID at every boot. These GUIDs are not suitable for
1705 * identifying an array. Luckily the header GUID appears to
1706 * remain constant.
1707 * We construct a pseudo-UUID from the header GUID and those
1708 * properties of the subarray that we expect to remain constant.
1709 */
1710 memset(buf, 0, sizeof(buf));
1711 p = buf;
1712 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1713 p += DDF_GUID_LEN;
1714 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1715 p += 16;
1716 *((__u16 *) p) = vcnum;
1717 sha1_init_ctx(&ctx);
1718 sha1_process_bytes(buf, sizeof(buf), &ctx);
1719 sha1_finish_ctx(&ctx, sha);
1720 memcpy(uuid, sha, 4*4);
1721 }
1722
1723 static void brief_detail_super_ddf(struct supertype *st)
1724 {
1725 struct mdinfo info;
1726 char nbuf[64];
1727 struct ddf_super *ddf = st->sb;
1728 unsigned int vcnum = get_vd_num_of_subarray(st);
1729 if (vcnum == DDF_CONTAINER)
1730 uuid_from_super_ddf(st, info.uuid);
1731 else if (vcnum == DDF_NOTFOUND)
1732 return;
1733 else
1734 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1735 fname_from_uuid(st, &info, nbuf,':');
1736 printf(" UUID=%s", nbuf + 5);
1737 }
1738 #endif
1739
1740 static int match_home_ddf(struct supertype *st, char *homehost)
1741 {
1742 /* It matches 'this' host if the controller is a
1743 * Linux-MD controller with vendor_data matching
1744 * the hostname
1745 */
1746 struct ddf_super *ddf = st->sb;
1747 unsigned int len;
1748
1749 if (!homehost)
1750 return 0;
1751 len = strlen(homehost);
1752
1753 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1754 len < sizeof(ddf->controller.vendor_data) &&
1755 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1756 ddf->controller.vendor_data[len] == 0);
1757 }
1758
1759 #ifndef MDASSEMBLE
1760 static int find_index_in_bvd(const struct ddf_super *ddf,
1761 const struct vd_config *conf, unsigned int n,
1762 unsigned int *n_bvd)
1763 {
1764 /*
1765 * Find the index of the n-th valid physical disk in this BVD
1766 */
1767 unsigned int i, j;
1768 for (i = 0, j = 0; i < ddf->mppe &&
1769 j < be16_to_cpu(conf->prim_elmnt_count); i++) {
1770 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1771 if (n == j) {
1772 *n_bvd = i;
1773 return 1;
1774 }
1775 j++;
1776 }
1777 }
1778 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1779 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1780 return 0;
1781 }
1782
1783 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1784 unsigned int n,
1785 unsigned int *n_bvd, struct vcl **vcl)
1786 {
1787 struct vcl *v;
1788
1789 for (v = ddf->conflist; v; v = v->next) {
1790 unsigned int nsec, ibvd = 0;
1791 struct vd_config *conf;
1792 if (inst != v->vcnum)
1793 continue;
1794 conf = &v->conf;
1795 if (conf->sec_elmnt_count == 1) {
1796 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1797 *vcl = v;
1798 return conf;
1799 } else
1800 goto bad;
1801 }
1802 if (v->other_bvds == NULL) {
1803 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1804 __func__, conf->sec_elmnt_count);
1805 goto bad;
1806 }
1807 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1808 if (conf->sec_elmnt_seq != nsec) {
1809 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1810 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1811 == nsec)
1812 break;
1813 }
1814 if (ibvd == conf->sec_elmnt_count)
1815 goto bad;
1816 conf = v->other_bvds[ibvd-1];
1817 }
1818 if (!find_index_in_bvd(ddf, conf,
1819 n - nsec*conf->sec_elmnt_count, n_bvd))
1820 goto bad;
1821 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1822 , __func__, n, *n_bvd, ibvd, inst);
1823 *vcl = v;
1824 return conf;
1825 }
1826 bad:
1827 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1828 return NULL;
1829 }
1830 #endif
1831
1832 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1833 {
1834 /* Find the entry in phys_disk which has the given refnum
1835 * and return it's index
1836 */
1837 unsigned int i;
1838 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1839 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1840 return i;
1841 return -1;
1842 }
1843
1844 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1845 {
1846 char buf[20];
1847 struct sha1_ctx ctx;
1848 sha1_init_ctx(&ctx);
1849 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1850 sha1_finish_ctx(&ctx, buf);
1851 memcpy(uuid, buf, 4*4);
1852 }
1853
1854 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1855 {
1856 /* The uuid returned here is used for:
1857 * uuid to put into bitmap file (Create, Grow)
1858 * uuid for backup header when saving critical section (Grow)
1859 * comparing uuids when re-adding a device into an array
1860 * In these cases the uuid required is that of the data-array,
1861 * not the device-set.
1862 * uuid to recognise same set when adding a missing device back
1863 * to an array. This is a uuid for the device-set.
1864 *
1865 * For each of these we can make do with a truncated
1866 * or hashed uuid rather than the original, as long as
1867 * everyone agrees.
1868 * In the case of SVD we assume the BVD is of interest,
1869 * though that might be the case if a bitmap were made for
1870 * a mirrored SVD - worry about that later.
1871 * So we need to find the VD configuration record for the
1872 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1873 * The first 16 bytes of the sha1 of these is used.
1874 */
1875 struct ddf_super *ddf = st->sb;
1876 struct vcl *vcl = ddf->currentconf;
1877
1878 if (vcl)
1879 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1880 else
1881 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1882 }
1883
1884 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1885
1886 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1887 {
1888 struct ddf_super *ddf = st->sb;
1889 int map_disks = info->array.raid_disks;
1890 __u32 *cptr;
1891
1892 if (ddf->currentconf) {
1893 getinfo_super_ddf_bvd(st, info, map);
1894 return;
1895 }
1896 memset(info, 0, sizeof(*info));
1897
1898 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1899 info->array.level = LEVEL_CONTAINER;
1900 info->array.layout = 0;
1901 info->array.md_minor = -1;
1902 cptr = (__u32 *)(ddf->anchor.guid + 16);
1903 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1904
1905 info->array.utime = 0;
1906 info->array.chunk_size = 0;
1907 info->container_enough = 1;
1908
1909 info->disk.major = 0;
1910 info->disk.minor = 0;
1911 if (ddf->dlist) {
1912 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1913 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1914
1915 info->data_offset = be64_to_cpu(ddf->phys->
1916 entries[info->disk.raid_disk].
1917 config_size);
1918 info->component_size = ddf->dlist->size - info->data_offset;
1919 } else {
1920 info->disk.number = -1;
1921 info->disk.raid_disk = -1;
1922 // info->disk.raid_disk = find refnum in the table and use index;
1923 }
1924 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1925
1926 info->recovery_start = MaxSector;
1927 info->reshape_active = 0;
1928 info->recovery_blocked = 0;
1929 info->name[0] = 0;
1930
1931 info->array.major_version = -1;
1932 info->array.minor_version = -2;
1933 strcpy(info->text_version, "ddf");
1934 info->safe_mode_delay = 0;
1935
1936 uuid_from_super_ddf(st, info->uuid);
1937
1938 if (map) {
1939 int i;
1940 for (i = 0 ; i < map_disks; i++) {
1941 if (i < info->array.raid_disks &&
1942 (be16_to_cpu(ddf->phys->entries[i].state)
1943 & DDF_Online) &&
1944 !(be16_to_cpu(ddf->phys->entries[i].state)
1945 & DDF_Failed))
1946 map[i] = 1;
1947 else
1948 map[i] = 0;
1949 }
1950 }
1951 }
1952
1953 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1954 {
1955 struct ddf_super *ddf = st->sb;
1956 struct vcl *vc = ddf->currentconf;
1957 int cd = ddf->currentdev;
1958 int n_prim;
1959 int j;
1960 struct dl *dl;
1961 int map_disks = info->array.raid_disks;
1962 __u32 *cptr;
1963 struct vd_config *conf;
1964
1965 memset(info, 0, sizeof(*info));
1966 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1967 return;
1968 info->array.md_minor = -1;
1969 cptr = (__u32 *)(vc->conf.guid + 16);
1970 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1971 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
1972 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1973 info->custom_array_size = 0;
1974
1975 conf = &vc->conf;
1976 n_prim = be16_to_cpu(conf->prim_elmnt_count);
1977 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
1978 int ibvd = cd / n_prim - 1;
1979 cd %= n_prim;
1980 conf = vc->other_bvds[ibvd];
1981 }
1982
1983 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1984 info->data_offset =
1985 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
1986 if (vc->block_sizes)
1987 info->component_size = vc->block_sizes[cd];
1988 else
1989 info->component_size = be64_to_cpu(conf->blocks);
1990 }
1991
1992 for (dl = ddf->dlist; dl ; dl = dl->next)
1993 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
1994 break;
1995
1996 info->disk.major = 0;
1997 info->disk.minor = 0;
1998 info->disk.state = 0;
1999 if (dl) {
2000 info->disk.major = dl->major;
2001 info->disk.minor = dl->minor;
2002 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2003 * be16_to_cpu(conf->prim_elmnt_count);
2004 info->disk.number = dl->pdnum;
2005 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2006 }
2007
2008 info->container_member = ddf->currentconf->vcnum;
2009
2010 info->recovery_start = MaxSector;
2011 info->resync_start = 0;
2012 info->reshape_active = 0;
2013 info->recovery_blocked = 0;
2014 if (!(ddf->virt->entries[info->container_member].state
2015 & DDF_state_inconsistent) &&
2016 (ddf->virt->entries[info->container_member].init_state
2017 & DDF_initstate_mask)
2018 == DDF_init_full)
2019 info->resync_start = MaxSector;
2020
2021 uuid_from_super_ddf(st, info->uuid);
2022
2023 info->array.major_version = -1;
2024 info->array.minor_version = -2;
2025 sprintf(info->text_version, "/%s/%d",
2026 st->container_devnm,
2027 info->container_member);
2028 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2029
2030 memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
2031 info->name[16]=0;
2032 for(j=0; j<16; j++)
2033 if (info->name[j] == ' ')
2034 info->name[j] = 0;
2035
2036 if (map)
2037 for (j = 0; j < map_disks; j++) {
2038 map[j] = 0;
2039 if (j < info->array.raid_disks) {
2040 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2041 if (i >= 0 &&
2042 (be16_to_cpu(ddf->phys->entries[i].state)
2043 & DDF_Online) &&
2044 !(be16_to_cpu(ddf->phys->entries[i].state)
2045 & DDF_Failed))
2046 map[i] = 1;
2047 }
2048 }
2049 }
2050
2051 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2052 char *update,
2053 char *devname, int verbose,
2054 int uuid_set, char *homehost)
2055 {
2056 /* For 'assemble' and 'force' we need to return non-zero if any
2057 * change was made. For others, the return value is ignored.
2058 * Update options are:
2059 * force-one : This device looks a bit old but needs to be included,
2060 * update age info appropriately.
2061 * assemble: clear any 'faulty' flag to allow this device to
2062 * be assembled.
2063 * force-array: Array is degraded but being forced, mark it clean
2064 * if that will be needed to assemble it.
2065 *
2066 * newdev: not used ????
2067 * grow: Array has gained a new device - this is currently for
2068 * linear only
2069 * resync: mark as dirty so a resync will happen.
2070 * uuid: Change the uuid of the array to match what is given
2071 * homehost: update the recorded homehost
2072 * name: update the name - preserving the homehost
2073 * _reshape_progress: record new reshape_progress position.
2074 *
2075 * Following are not relevant for this version:
2076 * sparc2.2 : update from old dodgey metadata
2077 * super-minor: change the preferred_minor number
2078 * summaries: update redundant counters.
2079 */
2080 int rv = 0;
2081 // struct ddf_super *ddf = st->sb;
2082 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2083 // struct virtual_entry *ve = find_ve(ddf);
2084
2085 /* we don't need to handle "force-*" or "assemble" as
2086 * there is no need to 'trick' the kernel. We the metadata is
2087 * first updated to activate the array, all the implied modifications
2088 * will just happen.
2089 */
2090
2091 if (strcmp(update, "grow") == 0) {
2092 /* FIXME */
2093 } else if (strcmp(update, "resync") == 0) {
2094 // info->resync_checkpoint = 0;
2095 } else if (strcmp(update, "homehost") == 0) {
2096 /* homehost is stored in controller->vendor_data,
2097 * or it is when we are the vendor
2098 */
2099 // if (info->vendor_is_local)
2100 // strcpy(ddf->controller.vendor_data, homehost);
2101 rv = -1;
2102 } else if (strcmp(update, "name") == 0) {
2103 /* name is stored in virtual_entry->name */
2104 // memset(ve->name, ' ', 16);
2105 // strncpy(ve->name, info->name, 16);
2106 rv = -1;
2107 } else if (strcmp(update, "_reshape_progress") == 0) {
2108 /* We don't support reshape yet */
2109 } else if (strcmp(update, "assemble") == 0 ) {
2110 /* Do nothing, just succeed */
2111 rv = 0;
2112 } else
2113 rv = -1;
2114
2115 // update_all_csum(ddf);
2116
2117 return rv;
2118 }
2119
2120 static void make_header_guid(char *guid)
2121 {
2122 be32 stamp;
2123 /* Create a DDF Header of Virtual Disk GUID */
2124
2125 /* 24 bytes of fiction required.
2126 * first 8 are a 'vendor-id' - "Linux-MD"
2127 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2128 * Remaining 8 random number plus timestamp
2129 */
2130 memcpy(guid, T10, sizeof(T10));
2131 stamp = cpu_to_be32(0xdeadbeef);
2132 memcpy(guid+8, &stamp, 4);
2133 stamp = cpu_to_be32(0);
2134 memcpy(guid+12, &stamp, 4);
2135 stamp = cpu_to_be32(time(0) - DECADE);
2136 memcpy(guid+16, &stamp, 4);
2137 stamp._v32 = random32();
2138 memcpy(guid+20, &stamp, 4);
2139 }
2140
2141 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2142 {
2143 unsigned int i;
2144 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2145 if (all_ff(ddf->virt->entries[i].guid))
2146 return i;
2147 }
2148 return DDF_NOTFOUND;
2149 }
2150
2151 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2152 const char *name)
2153 {
2154 unsigned int i;
2155 if (name == NULL)
2156 return DDF_NOTFOUND;
2157 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2158 if (all_ff(ddf->virt->entries[i].guid))
2159 continue;
2160 if (!strncmp(name, ddf->virt->entries[i].name,
2161 sizeof(ddf->virt->entries[i].name)))
2162 return i;
2163 }
2164 return DDF_NOTFOUND;
2165 }
2166
2167 #ifndef MDASSEMBLE
2168 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2169 const char *guid)
2170 {
2171 unsigned int i;
2172 if (guid == NULL || all_ff(guid))
2173 return DDF_NOTFOUND;
2174 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2175 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2176 return i;
2177 return DDF_NOTFOUND;
2178 }
2179 #endif
2180
2181 static int init_super_ddf_bvd(struct supertype *st,
2182 mdu_array_info_t *info,
2183 unsigned long long size,
2184 char *name, char *homehost,
2185 int *uuid, unsigned long long data_offset);
2186
2187 static int init_super_ddf(struct supertype *st,
2188 mdu_array_info_t *info,
2189 unsigned long long size, char *name, char *homehost,
2190 int *uuid, unsigned long long data_offset)
2191 {
2192 /* This is primarily called by Create when creating a new array.
2193 * We will then get add_to_super called for each component, and then
2194 * write_init_super called to write it out to each device.
2195 * For DDF, Create can create on fresh devices or on a pre-existing
2196 * array.
2197 * To create on a pre-existing array a different method will be called.
2198 * This one is just for fresh drives.
2199 *
2200 * We need to create the entire 'ddf' structure which includes:
2201 * DDF headers - these are easy.
2202 * Controller data - a Sector describing this controller .. not that
2203 * this is a controller exactly.
2204 * Physical Disk Record - one entry per device, so
2205 * leave plenty of space.
2206 * Virtual Disk Records - again, just leave plenty of space.
2207 * This just lists VDs, doesn't give details
2208 * Config records - describes the VDs that use this disk
2209 * DiskData - describes 'this' device.
2210 * BadBlockManagement - empty
2211 * Diag Space - empty
2212 * Vendor Logs - Could we put bitmaps here?
2213 *
2214 */
2215 struct ddf_super *ddf;
2216 char hostname[17];
2217 int hostlen;
2218 int max_phys_disks, max_virt_disks;
2219 unsigned long long sector;
2220 int clen;
2221 int i;
2222 int pdsize, vdsize;
2223 struct phys_disk *pd;
2224 struct virtual_disk *vd;
2225
2226 if (data_offset != INVALID_SECTORS) {
2227 pr_err("data-offset not supported by DDF\n");
2228 return 0;
2229 }
2230
2231 if (st->sb)
2232 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2233 data_offset);
2234
2235 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2236 pr_err("%s could not allocate superblock\n", __func__);
2237 return 0;
2238 }
2239 memset(ddf, 0, sizeof(*ddf));
2240 ddf->dlist = NULL; /* no physical disks yet */
2241 ddf->conflist = NULL; /* No virtual disks yet */
2242 st->sb = ddf;
2243
2244 if (info == NULL) {
2245 /* zeroing superblock */
2246 return 0;
2247 }
2248
2249 /* At least 32MB *must* be reserved for the ddf. So let's just
2250 * start 32MB from the end, and put the primary header there.
2251 * Don't do secondary for now.
2252 * We don't know exactly where that will be yet as it could be
2253 * different on each device. To just set up the lengths.
2254 *
2255 */
2256
2257 ddf->anchor.magic = DDF_HEADER_MAGIC;
2258 make_header_guid(ddf->anchor.guid);
2259
2260 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2261 ddf->anchor.seq = cpu_to_be32(1);
2262 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2263 ddf->anchor.openflag = 0xFF;
2264 ddf->anchor.foreignflag = 0;
2265 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2266 ddf->anchor.pad0 = 0xff;
2267 memset(ddf->anchor.pad1, 0xff, 12);
2268 memset(ddf->anchor.header_ext, 0xff, 32);
2269 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2270 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2271 ddf->anchor.type = DDF_HEADER_ANCHOR;
2272 memset(ddf->anchor.pad2, 0xff, 3);
2273 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2274 /* Put this at bottom of 32M reserved.. */
2275 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2276 max_phys_disks = 1023; /* Should be enough */
2277 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2278 max_virt_disks = 255;
2279 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks); /* ?? */
2280 ddf->anchor.max_partitions = cpu_to_be16(64); /* ?? */
2281 ddf->max_part = 64;
2282 ddf->mppe = 256;
2283 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2284 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2285 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2286 memset(ddf->anchor.pad3, 0xff, 54);
2287 /* controller sections is one sector long immediately
2288 * after the ddf header */
2289 sector = 1;
2290 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2291 ddf->anchor.controller_section_length = cpu_to_be32(1);
2292 sector += 1;
2293
2294 /* phys is 8 sectors after that */
2295 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2296 sizeof(struct phys_disk_entry)*max_phys_disks,
2297 512);
2298 switch(pdsize/512) {
2299 case 2: case 8: case 32: case 128: case 512: break;
2300 default: abort();
2301 }
2302 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2303 ddf->anchor.phys_section_length =
2304 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2305 sector += pdsize/512;
2306
2307 /* virt is another 32 sectors */
2308 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2309 sizeof(struct virtual_entry) * max_virt_disks,
2310 512);
2311 switch(vdsize/512) {
2312 case 2: case 8: case 32: case 128: case 512: break;
2313 default: abort();
2314 }
2315 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2316 ddf->anchor.virt_section_length =
2317 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2318 sector += vdsize/512;
2319
2320 clen = ddf->conf_rec_len * (ddf->max_part+1);
2321 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2322 ddf->anchor.config_section_length = cpu_to_be32(clen);
2323 sector += clen;
2324
2325 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2326 ddf->anchor.data_section_length = cpu_to_be32(1);
2327 sector += 1;
2328
2329 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2330 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2331 ddf->anchor.diag_space_length = cpu_to_be32(0);
2332 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2333 ddf->anchor.vendor_length = cpu_to_be32(0);
2334 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2335
2336 memset(ddf->anchor.pad4, 0xff, 256);
2337
2338 memcpy(&ddf->primary, &ddf->anchor, 512);
2339 memcpy(&ddf->secondary, &ddf->anchor, 512);
2340
2341 ddf->primary.openflag = 1; /* I guess.. */
2342 ddf->primary.type = DDF_HEADER_PRIMARY;
2343
2344 ddf->secondary.openflag = 1; /* I guess.. */
2345 ddf->secondary.type = DDF_HEADER_SECONDARY;
2346
2347 ddf->active = &ddf->primary;
2348
2349 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2350
2351 /* 24 more bytes of fiction required.
2352 * first 8 are a 'vendor-id' - "Linux-MD"
2353 * Remaining 16 are serial number.... maybe a hostname would do?
2354 */
2355 memcpy(ddf->controller.guid, T10, sizeof(T10));
2356 gethostname(hostname, sizeof(hostname));
2357 hostname[sizeof(hostname) - 1] = 0;
2358 hostlen = strlen(hostname);
2359 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2360 for (i = strlen(T10) ; i+hostlen < 24; i++)
2361 ddf->controller.guid[i] = ' ';
2362
2363 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2364 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2365 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2366 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2367 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2368 memset(ddf->controller.pad, 0xff, 8);
2369 memset(ddf->controller.vendor_data, 0xff, 448);
2370 if (homehost && strlen(homehost) < 440)
2371 strcpy((char*)ddf->controller.vendor_data, homehost);
2372
2373 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2374 pr_err("%s could not allocate pd\n", __func__);
2375 return 0;
2376 }
2377 ddf->phys = pd;
2378 ddf->pdsize = pdsize;
2379
2380 memset(pd, 0xff, pdsize);
2381 memset(pd, 0, sizeof(*pd));
2382 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2383 pd->used_pdes = cpu_to_be16(0);
2384 pd->max_pdes = cpu_to_be16(max_phys_disks);
2385 memset(pd->pad, 0xff, 52);
2386 for (i = 0; i < max_phys_disks; i++)
2387 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2388
2389 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2390 pr_err("%s could not allocate vd\n", __func__);
2391 return 0;
2392 }
2393 ddf->virt = vd;
2394 ddf->vdsize = vdsize;
2395 memset(vd, 0, vdsize);
2396 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2397 vd->populated_vdes = cpu_to_be16(0);
2398 vd->max_vdes = cpu_to_be16(max_virt_disks);
2399 memset(vd->pad, 0xff, 52);
2400
2401 for (i=0; i<max_virt_disks; i++)
2402 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2403
2404 st->sb = ddf;
2405 ddf_set_updates_pending(ddf);
2406 return 1;
2407 }
2408
2409 static int chunk_to_shift(int chunksize)
2410 {
2411 return ffs(chunksize/512)-1;
2412 }
2413
2414 #ifndef MDASSEMBLE
2415 struct extent {
2416 unsigned long long start, size;
2417 };
2418 static int cmp_extent(const void *av, const void *bv)
2419 {
2420 const struct extent *a = av;
2421 const struct extent *b = bv;
2422 if (a->start < b->start)
2423 return -1;
2424 if (a->start > b->start)
2425 return 1;
2426 return 0;
2427 }
2428
2429 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2430 {
2431 /* find a list of used extents on the give physical device
2432 * (dnum) of the given ddf.
2433 * Return a malloced array of 'struct extent'
2434
2435 * FIXME ignore DDF_Legacy devices?
2436
2437 */
2438 struct extent *rv;
2439 int n = 0;
2440 unsigned int i;
2441 __u16 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2442
2443 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2444 return NULL;
2445
2446 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2447
2448 for (i = 0; i < ddf->max_part; i++) {
2449 const struct vd_config *bvd;
2450 unsigned int ibvd;
2451 struct vcl *v = dl->vlist[i];
2452 if (v == NULL ||
2453 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2454 &bvd, &ibvd) == DDF_NOTFOUND)
2455 continue;
2456 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2457 rv[n].size = be64_to_cpu(bvd->blocks);
2458 n++;
2459 }
2460 qsort(rv, n, sizeof(*rv), cmp_extent);
2461
2462 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2463 rv[n].size = 0;
2464 return rv;
2465 }
2466 #endif
2467
2468 static int init_super_ddf_bvd(struct supertype *st,
2469 mdu_array_info_t *info,
2470 unsigned long long size,
2471 char *name, char *homehost,
2472 int *uuid, unsigned long long data_offset)
2473 {
2474 /* We are creating a BVD inside a pre-existing container.
2475 * so st->sb is already set.
2476 * We need to create a new vd_config and a new virtual_entry
2477 */
2478 struct ddf_super *ddf = st->sb;
2479 unsigned int venum, i;
2480 struct virtual_entry *ve;
2481 struct vcl *vcl;
2482 struct vd_config *vc;
2483
2484 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2485 pr_err("This ddf already has an array called %s\n", name);
2486 return 0;
2487 }
2488 venum = find_unused_vde(ddf);
2489 if (venum == DDF_NOTFOUND) {
2490 pr_err("Cannot find spare slot for virtual disk\n");
2491 return 0;
2492 }
2493 ve = &ddf->virt->entries[venum];
2494
2495 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2496 * timestamp, random number
2497 */
2498 make_header_guid(ve->guid);
2499 ve->unit = cpu_to_be16(info->md_minor);
2500 ve->pad0 = 0xFFFF;
2501 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2502 DDF_GUID_LEN);
2503 ve->type = cpu_to_be16(0);
2504 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2505 if (info->state & 1) /* clean */
2506 ve->init_state = DDF_init_full;
2507 else
2508 ve->init_state = DDF_init_not;
2509
2510 memset(ve->pad1, 0xff, 14);
2511 memset(ve->name, ' ', 16);
2512 if (name)
2513 strncpy(ve->name, name, 16);
2514 ddf->virt->populated_vdes =
2515 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2516
2517 /* Now create a new vd_config */
2518 if (posix_memalign((void**)&vcl, 512,
2519 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2520 pr_err("%s could not allocate vd_config\n", __func__);
2521 return 0;
2522 }
2523 vcl->vcnum = venum;
2524 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2525 vc = &vcl->conf;
2526
2527 vc->magic = DDF_VD_CONF_MAGIC;
2528 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2529 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2530 vc->seqnum = cpu_to_be32(1);
2531 memset(vc->pad0, 0xff, 24);
2532 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2533 if (layout_md2ddf(info, vc) == -1 ||
2534 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2535 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2536 __func__, info->level, info->layout, info->raid_disks);
2537 free(vcl);
2538 return 0;
2539 }
2540 vc->sec_elmnt_seq = 0;
2541 if (alloc_other_bvds(ddf, vcl) != 0) {
2542 pr_err("%s could not allocate other bvds\n",
2543 __func__);
2544 free(vcl);
2545 return 0;
2546 }
2547 vc->blocks = cpu_to_be64(info->size * 2);
2548 vc->array_blocks = cpu_to_be64(
2549 calc_array_size(info->level, info->raid_disks, info->layout,
2550 info->chunk_size, info->size*2));
2551 memset(vc->pad1, 0xff, 8);
2552 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2553 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2554 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2555 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2556 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2557 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2558 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2559 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2560 memset(vc->cache_pol, 0, 8);
2561 vc->bg_rate = 0x80;
2562 memset(vc->pad2, 0xff, 3);
2563 memset(vc->pad3, 0xff, 52);
2564 memset(vc->pad4, 0xff, 192);
2565 memset(vc->v0, 0xff, 32);
2566 memset(vc->v1, 0xff, 32);
2567 memset(vc->v2, 0xff, 16);
2568 memset(vc->v3, 0xff, 16);
2569 memset(vc->vendor, 0xff, 32);
2570
2571 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2572 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2573
2574 for (i = 1; i < vc->sec_elmnt_count; i++) {
2575 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2576 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2577 }
2578
2579 vcl->next = ddf->conflist;
2580 ddf->conflist = vcl;
2581 ddf->currentconf = vcl;
2582 ddf_set_updates_pending(ddf);
2583 return 1;
2584 }
2585
2586
2587 #ifndef MDASSEMBLE
2588 static int get_svd_state(const struct ddf_super *, const struct vcl *);
2589
2590 static void add_to_super_ddf_bvd(struct supertype *st,
2591 mdu_disk_info_t *dk, int fd, char *devname)
2592 {
2593 /* fd and devname identify a device with-in the ddf container (st).
2594 * dk identifies a location in the new BVD.
2595 * We need to find suitable free space in that device and update
2596 * the phys_refnum and lba_offset for the newly created vd_config.
2597 * We might also want to update the type in the phys_disk
2598 * section.
2599 *
2600 * Alternately: fd == -1 and we have already chosen which device to
2601 * use and recorded in dlist->raid_disk;
2602 */
2603 struct dl *dl;
2604 struct ddf_super *ddf = st->sb;
2605 struct vd_config *vc;
2606 unsigned int i;
2607 unsigned long long blocks, pos, esize;
2608 struct extent *ex;
2609 unsigned int raid_disk = dk->raid_disk;
2610
2611 if (fd == -1) {
2612 for (dl = ddf->dlist; dl ; dl = dl->next)
2613 if (dl->raiddisk == dk->raid_disk)
2614 break;
2615 } else {
2616 for (dl = ddf->dlist; dl ; dl = dl->next)
2617 if (dl->major == dk->major &&
2618 dl->minor == dk->minor)
2619 break;
2620 }
2621 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2622 return;
2623
2624 vc = &ddf->currentconf->conf;
2625 if (vc->sec_elmnt_count > 1) {
2626 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2627 if (raid_disk >= n)
2628 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2629 raid_disk %= n;
2630 }
2631
2632 ex = get_extents(ddf, dl);
2633 if (!ex)
2634 return;
2635
2636 i = 0; pos = 0;
2637 blocks = be64_to_cpu(vc->blocks);
2638 if (ddf->currentconf->block_sizes)
2639 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2640
2641 do {
2642 esize = ex[i].start - pos;
2643 if (esize >= blocks)
2644 break;
2645 pos = ex[i].start + ex[i].size;
2646 i++;
2647 } while (ex[i-1].size);
2648
2649 free(ex);
2650 if (esize < blocks)
2651 return;
2652
2653 ddf->currentdev = dk->raid_disk;
2654 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2655 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2656
2657 for (i = 0; i < ddf->max_part ; i++)
2658 if (dl->vlist[i] == NULL)
2659 break;
2660 if (i == ddf->max_part)
2661 return;
2662 dl->vlist[i] = ddf->currentconf;
2663
2664 if (fd >= 0)
2665 dl->fd = fd;
2666 if (devname)
2667 dl->devname = devname;
2668
2669 /* Check if we can mark array as optimal yet */
2670 i = ddf->currentconf->vcnum;
2671 ddf->virt->entries[i].state =
2672 (ddf->virt->entries[i].state & ~DDF_state_mask)
2673 | get_svd_state(ddf, ddf->currentconf);
2674 be16_clear(ddf->phys->entries[dl->pdnum].type,
2675 cpu_to_be16(DDF_Global_Spare));
2676 be16_set(ddf->phys->entries[dl->pdnum].type,
2677 cpu_to_be16(DDF_Active_in_VD));
2678 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2679 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2680 ddf->currentconf->vcnum, guid_str(vc->guid),
2681 dk->raid_disk);
2682 ddf_set_updates_pending(ddf);
2683 }
2684
2685 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2686 {
2687 unsigned int i;
2688 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2689 if (all_ff(ddf->phys->entries[i].guid))
2690 return i;
2691 }
2692 return DDF_NOTFOUND;
2693 }
2694
2695 /* add a device to a container, either while creating it or while
2696 * expanding a pre-existing container
2697 */
2698 static int add_to_super_ddf(struct supertype *st,
2699 mdu_disk_info_t *dk, int fd, char *devname,
2700 unsigned long long data_offset)
2701 {
2702 struct ddf_super *ddf = st->sb;
2703 struct dl *dd;
2704 time_t now;
2705 struct tm *tm;
2706 unsigned long long size;
2707 struct phys_disk_entry *pde;
2708 unsigned int n, i;
2709 struct stat stb;
2710 __u32 *tptr;
2711
2712 if (ddf->currentconf) {
2713 add_to_super_ddf_bvd(st, dk, fd, devname);
2714 return 0;
2715 }
2716
2717 /* This is device numbered dk->number. We need to create
2718 * a phys_disk entry and a more detailed disk_data entry.
2719 */
2720 fstat(fd, &stb);
2721 n = find_unused_pde(ddf);
2722 if (n == DDF_NOTFOUND) {
2723 pr_err("%s: No free slot in array, cannot add disk\n",
2724 __func__);
2725 return 1;
2726 }
2727 pde = &ddf->phys->entries[n];
2728 get_dev_size(fd, NULL, &size);
2729 if (size <= 32*1024*1024) {
2730 pr_err("%s: device size must be at least 32MB\n",
2731 __func__);
2732 return 1;
2733 }
2734 size >>= 9;
2735
2736 if (posix_memalign((void**)&dd, 512,
2737 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2738 pr_err("%s could allocate buffer for new disk, aborting\n",
2739 __func__);
2740 return 1;
2741 }
2742 dd->major = major(stb.st_rdev);
2743 dd->minor = minor(stb.st_rdev);
2744 dd->devname = devname;
2745 dd->fd = fd;
2746 dd->spare = NULL;
2747
2748 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2749 now = time(0);
2750 tm = localtime(&now);
2751 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2752 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2753 tptr = (__u32 *)(dd->disk.guid + 16);
2754 *tptr++ = random32();
2755 *tptr = random32();
2756
2757 do {
2758 /* Cannot be bothered finding a CRC of some irrelevant details*/
2759 dd->disk.refnum._v32 = random32();
2760 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2761 i > 0; i--)
2762 if (be32_eq(ddf->phys->entries[i-1].refnum,
2763 dd->disk.refnum))
2764 break;
2765 } while (i > 0);
2766
2767 dd->disk.forced_ref = 1;
2768 dd->disk.forced_guid = 1;
2769 memset(dd->disk.vendor, ' ', 32);
2770 memcpy(dd->disk.vendor, "Linux", 5);
2771 memset(dd->disk.pad, 0xff, 442);
2772 for (i = 0; i < ddf->max_part ; i++)
2773 dd->vlist[i] = NULL;
2774
2775 dd->pdnum = n;
2776
2777 if (st->update_tail) {
2778 int len = (sizeof(struct phys_disk) +
2779 sizeof(struct phys_disk_entry));
2780 struct phys_disk *pd;
2781
2782 pd = xmalloc(len);
2783 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2784 pd->used_pdes = cpu_to_be16(n);
2785 pde = &pd->entries[0];
2786 dd->mdupdate = pd;
2787 } else
2788 ddf->phys->used_pdes = cpu_to_be16(
2789 1 + be16_to_cpu(ddf->phys->used_pdes));
2790
2791 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2792 pde->refnum = dd->disk.refnum;
2793 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2794 pde->state = cpu_to_be16(DDF_Online);
2795 dd->size = size;
2796 /*
2797 * If there is already a device in dlist, try to reserve the same
2798 * amount of workspace. Otherwise, use 32MB.
2799 * We checked disk size above already.
2800 */
2801 #define __calc_lba(new, old, lba, mb) do { \
2802 unsigned long long dif; \
2803 if ((old) != NULL) \
2804 dif = (old)->size - be64_to_cpu((old)->lba); \
2805 else \
2806 dif = (new)->size; \
2807 if ((new)->size > dif) \
2808 (new)->lba = cpu_to_be64((new)->size - dif); \
2809 else \
2810 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2811 } while (0)
2812 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2813 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2814 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2815 pde->config_size = dd->workspace_lba;
2816
2817 sprintf(pde->path, "%17.17s","Information: nil") ;
2818 memset(pde->pad, 0xff, 6);
2819
2820 if (st->update_tail) {
2821 dd->next = ddf->add_list;
2822 ddf->add_list = dd;
2823 } else {
2824 dd->next = ddf->dlist;
2825 ddf->dlist = dd;
2826 ddf_set_updates_pending(ddf);
2827 }
2828
2829 return 0;
2830 }
2831
2832 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2833 {
2834 struct ddf_super *ddf = st->sb;
2835 struct dl *dl;
2836
2837 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2838 * disappeared from the container.
2839 * We need to arrange that it disappears from the metadata and
2840 * internal data structures too.
2841 * Most of the work is done by ddf_process_update which edits
2842 * the metadata and closes the file handle and attaches the memory
2843 * where free_updates will free it.
2844 */
2845 for (dl = ddf->dlist; dl ; dl = dl->next)
2846 if (dl->major == dk->major &&
2847 dl->minor == dk->minor)
2848 break;
2849 if (!dl)
2850 return -1;
2851
2852 if (st->update_tail) {
2853 int len = (sizeof(struct phys_disk) +
2854 sizeof(struct phys_disk_entry));
2855 struct phys_disk *pd;
2856
2857 pd = xmalloc(len);
2858 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2859 pd->used_pdes = cpu_to_be16(dl->pdnum);
2860 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2861 append_metadata_update(st, pd, len);
2862 }
2863 return 0;
2864 }
2865 #endif
2866
2867 /*
2868 * This is the write_init_super method for a ddf container. It is
2869 * called when creating a container or adding another device to a
2870 * container.
2871 */
2872 #define NULL_CONF_SZ 4096
2873
2874 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2875 {
2876 unsigned long long sector;
2877 struct ddf_header *header;
2878 int fd, i, n_config, conf_size, buf_size;
2879 int ret = 0;
2880 char *conf;
2881
2882 fd = d->fd;
2883
2884 switch (type) {
2885 case DDF_HEADER_PRIMARY:
2886 header = &ddf->primary;
2887 sector = be64_to_cpu(header->primary_lba);
2888 break;
2889 case DDF_HEADER_SECONDARY:
2890 header = &ddf->secondary;
2891 sector = be64_to_cpu(header->secondary_lba);
2892 break;
2893 default:
2894 return 0;
2895 }
2896
2897 header->type = type;
2898 header->openflag = 1;
2899 header->crc = calc_crc(header, 512);
2900
2901 lseek64(fd, sector<<9, 0);
2902 if (write(fd, header, 512) < 0)
2903 goto out;
2904
2905 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2906 if (write(fd, &ddf->controller, 512) < 0)
2907 goto out;
2908
2909 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2910 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2911 goto out;
2912 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2913 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2914 goto out;
2915
2916 /* Now write lots of config records. */
2917 n_config = ddf->max_part;
2918 conf_size = ddf->conf_rec_len * 512;
2919 conf = ddf->conf;
2920 buf_size = conf_size * (n_config + 1);
2921 if (!conf) {
2922 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
2923 goto out;
2924 ddf->conf = conf;
2925 }
2926 for (i = 0 ; i <= n_config ; i++) {
2927 struct vcl *c;
2928 struct vd_config *vdc = NULL;
2929 if (i == n_config) {
2930 c = (struct vcl *)d->spare;
2931 if (c)
2932 vdc = &c->conf;
2933 } else {
2934 unsigned int dummy;
2935 c = d->vlist[i];
2936 if (c)
2937 get_pd_index_from_refnum(
2938 c, d->disk.refnum,
2939 ddf->mppe,
2940 (const struct vd_config **)&vdc,
2941 &dummy);
2942 }
2943 if (c) {
2944 dprintf("writing conf record %i on disk %08x for %s/%u\n",
2945 i, be32_to_cpu(d->disk.refnum),
2946 guid_str(vdc->guid),
2947 vdc->sec_elmnt_seq);
2948 vdc->seqnum = header->seq;
2949 vdc->crc = calc_crc(vdc, conf_size);
2950 memcpy(conf + i*conf_size, vdc, conf_size);
2951 } else
2952 memset(conf + i*conf_size, 0xff, conf_size);
2953 }
2954 if (write(fd, conf, buf_size) != buf_size)
2955 goto out;
2956
2957 d->disk.crc = calc_crc(&d->disk, 512);
2958 if (write(fd, &d->disk, 512) < 0)
2959 goto out;
2960
2961 ret = 1;
2962 out:
2963 header->openflag = 0;
2964 header->crc = calc_crc(header, 512);
2965
2966 lseek64(fd, sector<<9, 0);
2967 if (write(fd, header, 512) < 0)
2968 ret = 0;
2969
2970 return ret;
2971 }
2972
2973 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
2974 {
2975 unsigned long long size;
2976 int fd = d->fd;
2977 if (fd < 0)
2978 return 0;
2979
2980 /* We need to fill in the primary, (secondary) and workspace
2981 * lba's in the headers, set their checksums,
2982 * Also checksum phys, virt....
2983 *
2984 * Then write everything out, finally the anchor is written.
2985 */
2986 get_dev_size(fd, NULL, &size);
2987 size /= 512;
2988 if (be64_to_cpu(d->workspace_lba) != 0ULL)
2989 ddf->anchor.workspace_lba = d->workspace_lba;
2990 else
2991 ddf->anchor.workspace_lba =
2992 cpu_to_be64(size - 32*1024*2);
2993 if (be64_to_cpu(d->primary_lba) != 0ULL)
2994 ddf->anchor.primary_lba = d->primary_lba;
2995 else
2996 ddf->anchor.primary_lba =
2997 cpu_to_be64(size - 16*1024*2);
2998 if (be64_to_cpu(d->secondary_lba) != 0ULL)
2999 ddf->anchor.secondary_lba = d->secondary_lba;
3000 else
3001 ddf->anchor.secondary_lba =
3002 cpu_to_be64(size - 32*1024*2);
3003 ddf->anchor.seq = ddf->active->seq;
3004 memcpy(&ddf->primary, &ddf->anchor, 512);
3005 memcpy(&ddf->secondary, &ddf->anchor, 512);
3006
3007 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3008 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3009 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3010
3011 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3012 return 0;
3013
3014 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3015 return 0;
3016
3017 lseek64(fd, (size-1)*512, SEEK_SET);
3018 if (write(fd, &ddf->anchor, 512) < 0)
3019 return 0;
3020
3021 return 1;
3022 }
3023
3024 #ifndef MDASSEMBLE
3025 static int __write_init_super_ddf(struct supertype *st)
3026 {
3027 struct ddf_super *ddf = st->sb;
3028 struct dl *d;
3029 int attempts = 0;
3030 int successes = 0;
3031
3032 pr_state(ddf, __func__);
3033
3034 /* try to write updated metadata,
3035 * if we catch a failure move on to the next disk
3036 */
3037 for (d = ddf->dlist; d; d=d->next) {
3038 attempts++;
3039 successes += _write_super_to_disk(ddf, d);
3040 }
3041
3042 return attempts != successes;
3043 }
3044
3045 static int write_init_super_ddf(struct supertype *st)
3046 {
3047 struct ddf_super *ddf = st->sb;
3048 struct vcl *currentconf = ddf->currentconf;
3049
3050 /* we are done with currentconf reset it to point st at the container */
3051 ddf->currentconf = NULL;
3052
3053 if (st->update_tail) {
3054 /* queue the virtual_disk and vd_config as metadata updates */
3055 struct virtual_disk *vd;
3056 struct vd_config *vc;
3057 int len, tlen;
3058 unsigned int i;
3059
3060 if (!currentconf) {
3061 int len = (sizeof(struct phys_disk) +
3062 sizeof(struct phys_disk_entry));
3063
3064 /* adding a disk to the container. */
3065 if (!ddf->add_list)
3066 return 0;
3067
3068 append_metadata_update(st, ddf->add_list->mdupdate, len);
3069 ddf->add_list->mdupdate = NULL;
3070 return 0;
3071 }
3072
3073 /* Newly created VD */
3074
3075 /* First the virtual disk. We have a slightly fake header */
3076 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3077 vd = xmalloc(len);
3078 *vd = *ddf->virt;
3079 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3080 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3081 append_metadata_update(st, vd, len);
3082
3083 /* Then the vd_config */
3084 len = ddf->conf_rec_len * 512;
3085 tlen = len * currentconf->conf.sec_elmnt_count;
3086 vc = xmalloc(tlen);
3087 memcpy(vc, &currentconf->conf, len);
3088 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3089 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3090 len);
3091 append_metadata_update(st, vc, tlen);
3092
3093 /* FIXME I need to close the fds! */
3094 return 0;
3095 } else {
3096 struct dl *d;
3097 if (!currentconf)
3098 for (d = ddf->dlist; d; d=d->next)
3099 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3100 return __write_init_super_ddf(st);
3101 }
3102 }
3103
3104 #endif
3105
3106 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3107 unsigned long long data_offset)
3108 {
3109 /* We must reserve the last 32Meg */
3110 if (devsize <= 32*1024*2)
3111 return 0;
3112 return devsize - 32*1024*2;
3113 }
3114
3115 #ifndef MDASSEMBLE
3116
3117 static int reserve_space(struct supertype *st, int raiddisks,
3118 unsigned long long size, int chunk,
3119 unsigned long long *freesize)
3120 {
3121 /* Find 'raiddisks' spare extents at least 'size' big (but
3122 * only caring about multiples of 'chunk') and remember
3123 * them.
3124 * If the cannot be found, fail.
3125 */
3126 struct dl *dl;
3127 struct ddf_super *ddf = st->sb;
3128 int cnt = 0;
3129
3130 for (dl = ddf->dlist; dl ; dl=dl->next) {
3131 dl->raiddisk = -1;
3132 dl->esize = 0;
3133 }
3134 /* Now find largest extent on each device */
3135 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3136 struct extent *e = get_extents(ddf, dl);
3137 unsigned long long pos = 0;
3138 int i = 0;
3139 int found = 0;
3140 unsigned long long minsize = size;
3141
3142 if (size == 0)
3143 minsize = chunk;
3144
3145 if (!e)
3146 continue;
3147 do {
3148 unsigned long long esize;
3149 esize = e[i].start - pos;
3150 if (esize >= minsize) {
3151 found = 1;
3152 minsize = esize;
3153 }
3154 pos = e[i].start + e[i].size;
3155 i++;
3156 } while (e[i-1].size);
3157 if (found) {
3158 cnt++;
3159 dl->esize = minsize;
3160 }
3161 free(e);
3162 }
3163 if (cnt < raiddisks) {
3164 pr_err("not enough devices with space to create array.\n");
3165 return 0; /* No enough free spaces large enough */
3166 }
3167 if (size == 0) {
3168 /* choose the largest size of which there are at least 'raiddisk' */
3169 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3170 struct dl *dl2;
3171 if (dl->esize <= size)
3172 continue;
3173 /* This is bigger than 'size', see if there are enough */
3174 cnt = 0;
3175 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3176 if (dl2->esize >= dl->esize)
3177 cnt++;
3178 if (cnt >= raiddisks)
3179 size = dl->esize;
3180 }
3181 if (chunk) {
3182 size = size / chunk;
3183 size *= chunk;
3184 }
3185 *freesize = size;
3186 if (size < 32) {
3187 pr_err("not enough spare devices to create array.\n");
3188 return 0;
3189 }
3190 }
3191 /* We have a 'size' of which there are enough spaces.
3192 * We simply do a first-fit */
3193 cnt = 0;
3194 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3195 if (dl->esize < size)
3196 continue;
3197
3198 dl->raiddisk = cnt;
3199 cnt++;
3200 }
3201 return 1;
3202 }
3203
3204 static int
3205 validate_geometry_ddf_container(struct supertype *st,
3206 int level, int layout, int raiddisks,
3207 int chunk, unsigned long long size,
3208 unsigned long long data_offset,
3209 char *dev, unsigned long long *freesize,
3210 int verbose);
3211
3212 static int validate_geometry_ddf_bvd(struct supertype *st,
3213 int level, int layout, int raiddisks,
3214 int *chunk, unsigned long long size,
3215 unsigned long long data_offset,
3216 char *dev, unsigned long long *freesize,
3217 int verbose);
3218
3219 static int validate_geometry_ddf(struct supertype *st,
3220 int level, int layout, int raiddisks,
3221 int *chunk, unsigned long long size,
3222 unsigned long long data_offset,
3223 char *dev, unsigned long long *freesize,
3224 int verbose)
3225 {
3226 int fd;
3227 struct mdinfo *sra;
3228 int cfd;
3229
3230 /* ddf potentially supports lots of things, but it depends on
3231 * what devices are offered (and maybe kernel version?)
3232 * If given unused devices, we will make a container.
3233 * If given devices in a container, we will make a BVD.
3234 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3235 */
3236
3237 if (*chunk == UnSet)
3238 *chunk = DEFAULT_CHUNK;
3239
3240 if (level == -1000000) level = LEVEL_CONTAINER;
3241 if (level == LEVEL_CONTAINER) {
3242 /* Must be a fresh device to add to a container */
3243 return validate_geometry_ddf_container(st, level, layout,
3244 raiddisks, *chunk,
3245 size, data_offset, dev,
3246 freesize,
3247 verbose);
3248 }
3249
3250 if (!dev) {
3251 mdu_array_info_t array = {
3252 .level = level, .layout = layout,
3253 .raid_disks = raiddisks
3254 };
3255 struct vd_config conf;
3256 if (layout_md2ddf(&array, &conf) == -1) {
3257 if (verbose)
3258 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3259 level, layout, raiddisks);
3260 return 0;
3261 }
3262 /* Should check layout? etc */
3263
3264 if (st->sb && freesize) {
3265 /* --create was given a container to create in.
3266 * So we need to check that there are enough
3267 * free spaces and return the amount of space.
3268 * We may as well remember which drives were
3269 * chosen so that add_to_super/getinfo_super
3270 * can return them.
3271 */
3272 return reserve_space(st, raiddisks, size, *chunk, freesize);
3273 }
3274 return 1;
3275 }
3276
3277 if (st->sb) {
3278 /* A container has already been opened, so we are
3279 * creating in there. Maybe a BVD, maybe an SVD.
3280 * Should make a distinction one day.
3281 */
3282 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3283 chunk, size, data_offset, dev,
3284 freesize,
3285 verbose);
3286 }
3287 /* This is the first device for the array.
3288 * If it is a container, we read it in and do automagic allocations,
3289 * no other devices should be given.
3290 * Otherwise it must be a member device of a container, and we
3291 * do manual allocation.
3292 * Later we should check for a BVD and make an SVD.
3293 */
3294 fd = open(dev, O_RDONLY|O_EXCL, 0);
3295 if (fd >= 0) {
3296 sra = sysfs_read(fd, NULL, GET_VERSION);
3297 close(fd);
3298 if (sra && sra->array.major_version == -1 &&
3299 strcmp(sra->text_version, "ddf") == 0) {
3300
3301 /* load super */
3302 /* find space for 'n' devices. */
3303 /* remember the devices */
3304 /* Somehow return the fact that we have enough */
3305 }
3306
3307 if (verbose)
3308 pr_err("ddf: Cannot create this array "
3309 "on device %s - a container is required.\n",
3310 dev);
3311 return 0;
3312 }
3313 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3314 if (verbose)
3315 pr_err("ddf: Cannot open %s: %s\n",
3316 dev, strerror(errno));
3317 return 0;
3318 }
3319 /* Well, it is in use by someone, maybe a 'ddf' container. */
3320 cfd = open_container(fd);
3321 if (cfd < 0) {
3322 close(fd);
3323 if (verbose)
3324 pr_err("ddf: Cannot use %s: %s\n",
3325 dev, strerror(EBUSY));
3326 return 0;
3327 }
3328 sra = sysfs_read(cfd, NULL, GET_VERSION);
3329 close(fd);
3330 if (sra && sra->array.major_version == -1 &&
3331 strcmp(sra->text_version, "ddf") == 0) {
3332 /* This is a member of a ddf container. Load the container
3333 * and try to create a bvd
3334 */
3335 struct ddf_super *ddf;
3336 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3337 st->sb = ddf;
3338 strcpy(st->container_devnm, fd2devnm(cfd));
3339 close(cfd);
3340 return validate_geometry_ddf_bvd(st, level, layout,
3341 raiddisks, chunk, size,
3342 data_offset,
3343 dev, freesize,
3344 verbose);
3345 }
3346 close(cfd);
3347 } else /* device may belong to a different container */
3348 return 0;
3349
3350 return 1;
3351 }
3352
3353 static int
3354 validate_geometry_ddf_container(struct supertype *st,
3355 int level, int layout, int raiddisks,
3356 int chunk, unsigned long long size,
3357 unsigned long long data_offset,
3358 char *dev, unsigned long long *freesize,
3359 int verbose)
3360 {
3361 int fd;
3362 unsigned long long ldsize;
3363
3364 if (level != LEVEL_CONTAINER)
3365 return 0;
3366 if (!dev)
3367 return 1;
3368
3369 fd = open(dev, O_RDONLY|O_EXCL, 0);
3370 if (fd < 0) {
3371 if (verbose)
3372 pr_err("ddf: Cannot open %s: %s\n",
3373 dev, strerror(errno));
3374 return 0;
3375 }
3376 if (!get_dev_size(fd, dev, &ldsize)) {
3377 close(fd);
3378 return 0;
3379 }
3380 close(fd);
3381
3382 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3383 if (*freesize == 0)
3384 return 0;
3385
3386 return 1;
3387 }
3388
3389 static int validate_geometry_ddf_bvd(struct supertype *st,
3390 int level, int layout, int raiddisks,
3391 int *chunk, unsigned long long size,
3392 unsigned long long data_offset,
3393 char *dev, unsigned long long *freesize,
3394 int verbose)
3395 {
3396 struct stat stb;
3397 struct ddf_super *ddf = st->sb;
3398 struct dl *dl;
3399 unsigned long long pos = 0;
3400 unsigned long long maxsize;
3401 struct extent *e;
3402 int i;
3403 /* ddf/bvd supports lots of things, but not containers */
3404 if (level == LEVEL_CONTAINER) {
3405 if (verbose)
3406 pr_err("DDF cannot create a container within an container\n");
3407 return 0;
3408 }
3409 /* We must have the container info already read in. */
3410 if (!ddf)
3411 return 0;
3412
3413 if (!dev) {
3414 /* General test: make sure there is space for
3415 * 'raiddisks' device extents of size 'size'.
3416 */
3417 unsigned long long minsize = size;
3418 int dcnt = 0;
3419 if (minsize == 0)
3420 minsize = 8;
3421 for (dl = ddf->dlist; dl ; dl = dl->next)
3422 {
3423 int found = 0;
3424 pos = 0;
3425
3426 i = 0;
3427 e = get_extents(ddf, dl);
3428 if (!e) continue;
3429 do {
3430 unsigned long long esize;
3431 esize = e[i].start - pos;
3432 if (esize >= minsize)
3433 found = 1;
3434 pos = e[i].start + e[i].size;
3435 i++;
3436 } while (e[i-1].size);
3437 if (found)
3438 dcnt++;
3439 free(e);
3440 }
3441 if (dcnt < raiddisks) {
3442 if (verbose)
3443 pr_err("ddf: Not enough devices with "
3444 "space for this array (%d < %d)\n",
3445 dcnt, raiddisks);
3446 return 0;
3447 }
3448 return 1;
3449 }
3450 /* This device must be a member of the set */
3451 if (stat(dev, &stb) < 0)
3452 return 0;
3453 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3454 return 0;
3455 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3456 if (dl->major == (int)major(stb.st_rdev) &&
3457 dl->minor == (int)minor(stb.st_rdev))
3458 break;
3459 }
3460 if (!dl) {
3461 if (verbose)
3462 pr_err("ddf: %s is not in the "
3463 "same DDF set\n",
3464 dev);
3465 return 0;
3466 }
3467 e = get_extents(ddf, dl);
3468 maxsize = 0;
3469 i = 0;
3470 if (e) do {
3471 unsigned long long esize;
3472 esize = e[i].start - pos;
3473 if (esize >= maxsize)
3474 maxsize = esize;
3475 pos = e[i].start + e[i].size;
3476 i++;
3477 } while (e[i-1].size);
3478 *freesize = maxsize;
3479 // FIXME here I am
3480
3481 return 1;
3482 }
3483
3484 static int load_super_ddf_all(struct supertype *st, int fd,
3485 void **sbp, char *devname)
3486 {
3487 struct mdinfo *sra;
3488 struct ddf_super *super;
3489 struct mdinfo *sd, *best = NULL;
3490 int bestseq = 0;
3491 int seq;
3492 char nm[20];
3493 int dfd;
3494
3495 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3496 if (!sra)
3497 return 1;
3498 if (sra->array.major_version != -1 ||
3499 sra->array.minor_version != -2 ||
3500 strcmp(sra->text_version, "ddf") != 0)
3501 return 1;
3502
3503 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3504 return 1;
3505 memset(super, 0, sizeof(*super));
3506
3507 /* first, try each device, and choose the best ddf */
3508 for (sd = sra->devs ; sd ; sd = sd->next) {
3509 int rv;
3510 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3511 dfd = dev_open(nm, O_RDONLY);
3512 if (dfd < 0)
3513 return 2;
3514 rv = load_ddf_headers(dfd, super, NULL);
3515 close(dfd);
3516 if (rv == 0) {
3517 seq = be32_to_cpu(super->active->seq);
3518 if (super->active->openflag)
3519 seq--;
3520 if (!best || seq > bestseq) {
3521 bestseq = seq;
3522 best = sd;
3523 }
3524 }
3525 }
3526 if (!best)
3527 return 1;
3528 /* OK, load this ddf */
3529 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3530 dfd = dev_open(nm, O_RDONLY);
3531 if (dfd < 0)
3532 return 1;
3533 load_ddf_headers(dfd, super, NULL);
3534 load_ddf_global(dfd, super, NULL);
3535 close(dfd);
3536 /* Now we need the device-local bits */
3537 for (sd = sra->devs ; sd ; sd = sd->next) {
3538 int rv;
3539
3540 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3541 dfd = dev_open(nm, O_RDWR);
3542 if (dfd < 0)
3543 return 2;
3544 rv = load_ddf_headers(dfd, super, NULL);
3545 if (rv == 0)
3546 rv = load_ddf_local(dfd, super, NULL, 1);
3547 if (rv)
3548 return 1;
3549 }
3550
3551 *sbp = super;
3552 if (st->ss == NULL) {
3553 st->ss = &super_ddf;
3554 st->minor_version = 0;
3555 st->max_devs = 512;
3556 }
3557 strcpy(st->container_devnm, fd2devnm(fd));
3558 return 0;
3559 }
3560
3561 static int load_container_ddf(struct supertype *st, int fd,
3562 char *devname)
3563 {
3564 return load_super_ddf_all(st, fd, &st->sb, devname);
3565 }
3566
3567 #endif /* MDASSEMBLE */
3568
3569 static int check_secondary(const struct vcl *vc)
3570 {
3571 const struct vd_config *conf = &vc->conf;
3572 int i;
3573
3574 /* The only DDF secondary RAID level md can support is
3575 * RAID 10, if the stripe sizes and Basic volume sizes
3576 * are all equal.
3577 * Other configurations could in theory be supported by exposing
3578 * the BVDs to user space and using device mapper for the secondary
3579 * mapping. So far we don't support that.
3580 */
3581
3582 __u64 sec_elements[4] = {0, 0, 0, 0};
3583 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3584 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3585
3586 if (vc->other_bvds == NULL) {
3587 pr_err("No BVDs for secondary RAID found\n");
3588 return -1;
3589 }
3590 if (conf->prl != DDF_RAID1) {
3591 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3592 return -1;
3593 }
3594 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3595 pr_err("Secondary RAID level %d is unsupported\n",
3596 conf->srl);
3597 return -1;
3598 }
3599 __set_sec_seen(conf->sec_elmnt_seq);
3600 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3601 const struct vd_config *bvd = vc->other_bvds[i];
3602 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3603 continue;
3604 if (bvd->srl != conf->srl) {
3605 pr_err("Inconsistent secondary RAID level across BVDs\n");
3606 return -1;
3607 }
3608 if (bvd->prl != conf->prl) {
3609 pr_err("Different RAID levels for BVDs are unsupported\n");
3610 return -1;
3611 }
3612 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3613 pr_err("All BVDs must have the same number of primary elements\n");
3614 return -1;
3615 }
3616 if (bvd->chunk_shift != conf->chunk_shift) {
3617 pr_err("Different strip sizes for BVDs are unsupported\n");
3618 return -1;
3619 }
3620 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3621 pr_err("Different BVD sizes are unsupported\n");
3622 return -1;
3623 }
3624 __set_sec_seen(bvd->sec_elmnt_seq);
3625 }
3626 for (i = 0; i < conf->sec_elmnt_count; i++) {
3627 if (!__was_sec_seen(i)) {
3628 pr_err("BVD %d is missing\n", i);
3629 return -1;
3630 }
3631 }
3632 return 0;
3633 }
3634
3635 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3636 be32 refnum, unsigned int nmax,
3637 const struct vd_config **bvd,
3638 unsigned int *idx)
3639 {
3640 unsigned int i, j, n, sec, cnt;
3641
3642 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3643 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3644
3645 for (i = 0, j = 0 ; i < nmax ; i++) {
3646 /* j counts valid entries for this BVD */
3647 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3648 j++;
3649 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3650 *bvd = &vc->conf;
3651 *idx = i;
3652 return sec * cnt + j - 1;
3653 }
3654 }
3655 if (vc->other_bvds == NULL)
3656 goto bad;
3657
3658 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3659 struct vd_config *vd = vc->other_bvds[n-1];
3660 sec = vd->sec_elmnt_seq;
3661 if (sec == DDF_UNUSED_BVD)
3662 continue;
3663 for (i = 0, j = 0 ; i < nmax ; i++) {
3664 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3665 j++;
3666 if (be32_eq(vd->phys_refnum[i], refnum)) {
3667 *bvd = vd;
3668 *idx = i;
3669 return sec * cnt + j - 1;
3670 }
3671 }
3672 }
3673 bad:
3674 *bvd = NULL;
3675 return DDF_NOTFOUND;
3676 }
3677
3678 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3679 {
3680 /* Given a container loaded by load_super_ddf_all,
3681 * extract information about all the arrays into
3682 * an mdinfo tree.
3683 *
3684 * For each vcl in conflist: create an mdinfo, fill it in,
3685 * then look for matching devices (phys_refnum) in dlist
3686 * and create appropriate device mdinfo.
3687 */
3688 struct ddf_super *ddf = st->sb;
3689 struct mdinfo *rest = NULL;
3690 struct vcl *vc;
3691
3692 for (vc = ddf->conflist ; vc ; vc=vc->next)
3693 {
3694 unsigned int i;
3695 unsigned int j;
3696 struct mdinfo *this;
3697 char *ep;
3698 __u32 *cptr;
3699 unsigned int pd;
3700
3701 if (subarray &&
3702 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3703 *ep != '\0'))
3704 continue;
3705
3706 if (vc->conf.sec_elmnt_count > 1) {
3707 if (check_secondary(vc) != 0)
3708 continue;
3709 }
3710
3711 this = xcalloc(1, sizeof(*this));
3712 this->next = rest;
3713 rest = this;
3714
3715 if (layout_ddf2md(&vc->conf, &this->array))
3716 continue;
3717 this->array.md_minor = -1;
3718 this->array.major_version = -1;
3719 this->array.minor_version = -2;
3720 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3721 cptr = (__u32 *)(vc->conf.guid + 16);
3722 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3723 this->array.utime = DECADE +
3724 be32_to_cpu(vc->conf.timestamp);
3725 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3726
3727 i = vc->vcnum;
3728 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3729 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3730 DDF_init_full) {
3731 this->array.state = 0;
3732 this->resync_start = 0;
3733 } else {
3734 this->array.state = 1;
3735 this->resync_start = MaxSector;
3736 }
3737 memcpy(this->name, ddf->virt->entries[i].name, 16);
3738 this->name[16]=0;
3739 for(j=0; j<16; j++)
3740 if (this->name[j] == ' ')
3741 this->name[j] = 0;
3742
3743 memset(this->uuid, 0, sizeof(this->uuid));
3744 this->component_size = be64_to_cpu(vc->conf.blocks);
3745 this->array.size = this->component_size / 2;
3746 this->container_member = i;
3747
3748 ddf->currentconf = vc;
3749 uuid_from_super_ddf(st, this->uuid);
3750 if (!subarray)
3751 ddf->currentconf = NULL;
3752
3753 sprintf(this->text_version, "/%s/%d",
3754 st->container_devnm, this->container_member);
3755
3756 for (pd = 0; pd < be16_to_cpu(ddf->phys->used_pdes); pd++) {
3757 struct mdinfo *dev;
3758 struct dl *d;
3759 const struct vd_config *bvd;
3760 unsigned int iphys;
3761 int stt;
3762
3763 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3764 == 0xFFFFFFFF)
3765 continue;
3766
3767 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3768 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3769 != DDF_Online)
3770 continue;
3771
3772 i = get_pd_index_from_refnum(
3773 vc, ddf->phys->entries[pd].refnum,
3774 ddf->mppe, &bvd, &iphys);
3775 if (i == DDF_NOTFOUND)
3776 continue;
3777
3778 this->array.working_disks++;
3779
3780 for (d = ddf->dlist; d ; d=d->next)
3781 if (be32_eq(d->disk.refnum,
3782 ddf->phys->entries[pd].refnum))
3783 break;
3784 if (d == NULL)
3785 /* Haven't found that one yet, maybe there are others */
3786 continue;
3787
3788 dev = xcalloc(1, sizeof(*dev));
3789 dev->next = this->devs;
3790 this->devs = dev;
3791
3792 dev->disk.number = be32_to_cpu(d->disk.refnum);
3793 dev->disk.major = d->major;
3794 dev->disk.minor = d->minor;
3795 dev->disk.raid_disk = i;
3796 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3797 dev->recovery_start = MaxSector;
3798
3799 dev->events = be32_to_cpu(ddf->primary.seq);
3800 dev->data_offset =
3801 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3802 dev->component_size = be64_to_cpu(bvd->blocks);
3803 if (d->devname)
3804 strcpy(dev->name, d->devname);
3805 }
3806 }
3807 return rest;
3808 }
3809
3810 static int store_super_ddf(struct supertype *st, int fd)
3811 {
3812 struct ddf_super *ddf = st->sb;
3813 unsigned long long dsize;
3814 void *buf;
3815 int rc;
3816
3817 if (!ddf)
3818 return 1;
3819
3820 if (!get_dev_size(fd, NULL, &dsize))
3821 return 1;
3822
3823 if (ddf->dlist || ddf->conflist) {
3824 struct stat sta;
3825 struct dl *dl;
3826 int ofd, ret;
3827
3828 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3829 pr_err("%s: file descriptor for invalid device\n",
3830 __func__);
3831 return 1;
3832 }
3833 for (dl = ddf->dlist; dl; dl = dl->next)
3834 if (dl->major == (int)major(sta.st_rdev) &&
3835 dl->minor == (int)minor(sta.st_rdev))
3836 break;
3837 if (!dl) {
3838 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3839 (int)major(sta.st_rdev),
3840 (int)minor(sta.st_rdev));
3841 return 1;
3842 }
3843 ofd = dl->fd;
3844 dl->fd = fd;
3845 ret = (_write_super_to_disk(ddf, dl) != 1);
3846 dl->fd = ofd;
3847 return ret;
3848 }
3849
3850 if (posix_memalign(&buf, 512, 512) != 0)
3851 return 1;
3852 memset(buf, 0, 512);
3853
3854 lseek64(fd, dsize-512, 0);
3855 rc = write(fd, buf, 512);
3856 free(buf);
3857 if (rc < 0)
3858 return 1;
3859 return 0;
3860 }
3861
3862 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3863 {
3864 /*
3865 * return:
3866 * 0 same, or first was empty, and second was copied
3867 * 1 second had wrong number
3868 * 2 wrong uuid
3869 * 3 wrong other info
3870 */
3871 struct ddf_super *first = st->sb;
3872 struct ddf_super *second = tst->sb;
3873 struct dl *dl1, *dl2;
3874 struct vcl *vl1, *vl2;
3875 unsigned int max_vds, max_pds, pd, vd;
3876
3877 if (!first) {
3878 st->sb = tst->sb;
3879 tst->sb = NULL;
3880 return 0;
3881 }
3882
3883 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3884 return 2;
3885
3886 if (!be32_eq(first->anchor.seq, second->anchor.seq)) {
3887 dprintf("%s: sequence number mismatch %u/%u\n", __func__,
3888 be32_to_cpu(first->anchor.seq),
3889 be32_to_cpu(second->anchor.seq));
3890 return 3;
3891 }
3892 if (first->max_part != second->max_part ||
3893 !be16_eq(first->phys->used_pdes, second->phys->used_pdes) ||
3894 !be16_eq(first->virt->populated_vdes,
3895 second->virt->populated_vdes)) {
3896 dprintf("%s: PD/VD number mismatch\n", __func__);
3897 return 3;
3898 }
3899
3900 max_pds = be16_to_cpu(first->phys->used_pdes);
3901 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3902 for (pd = 0; pd < max_pds; pd++)
3903 if (be32_eq(first->phys->entries[pd].refnum,
3904 dl2->disk.refnum))
3905 break;
3906 if (pd == max_pds) {
3907 dprintf("%s: no match for disk %08x\n", __func__,
3908 be32_to_cpu(dl2->disk.refnum));
3909 return 3;
3910 }
3911 }
3912
3913 max_vds = be16_to_cpu(first->active->max_vd_entries);
3914 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3915 if (!be32_eq(vl2->conf.magic, DDF_VD_CONF_MAGIC))
3916 continue;
3917 for (vd = 0; vd < max_vds; vd++)
3918 if (!memcmp(first->virt->entries[vd].guid,
3919 vl2->conf.guid, DDF_GUID_LEN))
3920 break;
3921 if (vd == max_vds) {
3922 dprintf("%s: no match for VD config\n", __func__);
3923 return 3;
3924 }
3925 }
3926 /* FIXME should I look at anything else? */
3927
3928 /*
3929 At this point we are fairly sure that the meta data matches.
3930 But the new disk may contain additional local data.
3931 Add it to the super block.
3932 */
3933 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3934 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3935 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3936 DDF_GUID_LEN))
3937 break;
3938 if (vl1) {
3939 if (vl1->other_bvds != NULL &&
3940 vl1->conf.sec_elmnt_seq !=
3941 vl2->conf.sec_elmnt_seq) {
3942 dprintf("%s: adding BVD %u\n", __func__,
3943 vl2->conf.sec_elmnt_seq);
3944 add_other_bvd(vl1, &vl2->conf,
3945 first->conf_rec_len*512);
3946 }
3947 continue;
3948 }
3949
3950 if (posix_memalign((void **)&vl1, 512,
3951 (first->conf_rec_len*512 +
3952 offsetof(struct vcl, conf))) != 0) {
3953 pr_err("%s could not allocate vcl buf\n",
3954 __func__);
3955 return 3;
3956 }
3957
3958 vl1->next = first->conflist;
3959 vl1->block_sizes = NULL;
3960 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3961 if (alloc_other_bvds(first, vl1) != 0) {
3962 pr_err("%s could not allocate other bvds\n",
3963 __func__);
3964 free(vl1);
3965 return 3;
3966 }
3967 for (vd = 0; vd < max_vds; vd++)
3968 if (!memcmp(first->virt->entries[vd].guid,
3969 vl1->conf.guid, DDF_GUID_LEN))
3970 break;
3971 vl1->vcnum = vd;
3972 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
3973 first->conflist = vl1;
3974 }
3975
3976 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3977 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3978 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
3979 break;
3980 if (dl1)
3981 continue;
3982
3983 if (posix_memalign((void **)&dl1, 512,
3984 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
3985 != 0) {
3986 pr_err("%s could not allocate disk info buffer\n",
3987 __func__);
3988 return 3;
3989 }
3990 memcpy(dl1, dl2, sizeof(*dl1));
3991 dl1->mdupdate = NULL;
3992 dl1->next = first->dlist;
3993 dl1->fd = -1;
3994 for (pd = 0; pd < max_pds; pd++)
3995 if (be32_eq(first->phys->entries[pd].refnum,
3996 dl1->disk.refnum))
3997 break;
3998 dl1->pdnum = pd;
3999 if (dl2->spare) {
4000 if (posix_memalign((void **)&dl1->spare, 512,
4001 first->conf_rec_len*512) != 0) {
4002 pr_err("%s could not allocate spare info buf\n",
4003 __func__);
4004 return 3;
4005 }
4006 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
4007 }
4008 for (vd = 0 ; vd < first->max_part ; vd++) {
4009 if (!dl2->vlist[vd]) {
4010 dl1->vlist[vd] = NULL;
4011 continue;
4012 }
4013 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
4014 if (!memcmp(vl1->conf.guid,
4015 dl2->vlist[vd]->conf.guid,
4016 DDF_GUID_LEN))
4017 break;
4018 dl1->vlist[vd] = vl1;
4019 }
4020 }
4021 first->dlist = dl1;
4022 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
4023 be32_to_cpu(dl1->disk.refnum));
4024 }
4025
4026 return 0;
4027 }
4028
4029 #ifndef MDASSEMBLE
4030 /*
4031 * A new array 'a' has been started which claims to be instance 'inst'
4032 * within container 'c'.
4033 * We need to confirm that the array matches the metadata in 'c' so
4034 * that we don't corrupt any metadata.
4035 */
4036 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4037 {
4038 struct ddf_super *ddf = c->sb;
4039 int n = atoi(inst);
4040 struct mdinfo *dev;
4041 struct dl *dl;
4042 static const char faulty[] = "faulty";
4043
4044 if (all_ff(ddf->virt->entries[n].guid)) {
4045 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4046 return -ENODEV;
4047 }
4048 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
4049 guid_str(ddf->virt->entries[n].guid));
4050 for (dev = a->info.devs; dev; dev = dev->next) {
4051 for (dl = ddf->dlist; dl; dl = dl->next)
4052 if (dl->major == dev->disk.major &&
4053 dl->minor == dev->disk.minor)
4054 break;
4055 if (!dl) {
4056 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4057 __func__, dev->disk.major, dev->disk.minor, n);
4058 return -1;
4059 }
4060 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4061 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4062 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4063 __func__, n, dl->major, dl->minor,
4064 be16_to_cpu(
4065 ddf->phys->entries[dl->pdnum].state));
4066 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4067 sizeof(faulty) - 1)
4068 pr_err("Write to state_fd failed\n");
4069 dev->curr_state = DS_FAULTY;
4070 }
4071 }
4072 a->info.container_member = n;
4073 return 0;
4074 }
4075
4076 /*
4077 * The array 'a' is to be marked clean in the metadata.
4078 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4079 * clean up to the point (in sectors). If that cannot be recorded in the
4080 * metadata, then leave it as dirty.
4081 *
4082 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4083 * !global! virtual_disk.virtual_entry structure.
4084 */
4085 static int ddf_set_array_state(struct active_array *a, int consistent)
4086 {
4087 struct ddf_super *ddf = a->container->sb;
4088 int inst = a->info.container_member;
4089 int old = ddf->virt->entries[inst].state;
4090 if (consistent == 2) {
4091 /* Should check if a recovery should be started FIXME */
4092 consistent = 1;
4093 if (!is_resync_complete(&a->info))
4094 consistent = 0;
4095 }
4096 if (consistent)
4097 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4098 else
4099 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4100 if (old != ddf->virt->entries[inst].state)
4101 ddf_set_updates_pending(ddf);
4102
4103 old = ddf->virt->entries[inst].init_state;
4104 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4105 if (is_resync_complete(&a->info))
4106 ddf->virt->entries[inst].init_state |= DDF_init_full;
4107 else if (a->info.resync_start == 0)
4108 ddf->virt->entries[inst].init_state |= DDF_init_not;
4109 else
4110 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4111 if (old != ddf->virt->entries[inst].init_state)
4112 ddf_set_updates_pending(ddf);
4113
4114 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4115 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4116 consistent?"clean":"dirty",
4117 a->info.resync_start);
4118 return consistent;
4119 }
4120
4121 static int get_bvd_state(const struct ddf_super *ddf,
4122 const struct vd_config *vc)
4123 {
4124 unsigned int i, n_bvd, working = 0;
4125 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4126 int pd, st, state;
4127 for (i = 0; i < n_prim; i++) {
4128 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4129 continue;
4130 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4131 if (pd < 0)
4132 continue;
4133 st = be16_to_cpu(ddf->phys->entries[pd].state);
4134 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4135 == DDF_Online)
4136 working++;
4137 }
4138
4139 state = DDF_state_degraded;
4140 if (working == n_prim)
4141 state = DDF_state_optimal;
4142 else
4143 switch (vc->prl) {
4144 case DDF_RAID0:
4145 case DDF_CONCAT:
4146 case DDF_JBOD:
4147 state = DDF_state_failed;
4148 break;
4149 case DDF_RAID1:
4150 if (working == 0)
4151 state = DDF_state_failed;
4152 else if (working >= 2)
4153 state = DDF_state_part_optimal;
4154 break;
4155 case DDF_RAID4:
4156 case DDF_RAID5:
4157 if (working < n_prim - 1)
4158 state = DDF_state_failed;
4159 break;
4160 case DDF_RAID6:
4161 if (working < n_prim - 2)
4162 state = DDF_state_failed;
4163 else if (working == n_prim - 1)
4164 state = DDF_state_part_optimal;
4165 break;
4166 }
4167 return state;
4168 }
4169
4170 static int secondary_state(int state, int other, int seclevel)
4171 {
4172 if (state == DDF_state_optimal && other == DDF_state_optimal)
4173 return DDF_state_optimal;
4174 if (seclevel == DDF_2MIRRORED) {
4175 if (state == DDF_state_optimal || other == DDF_state_optimal)
4176 return DDF_state_part_optimal;
4177 if (state == DDF_state_failed && other == DDF_state_failed)
4178 return DDF_state_failed;
4179 return DDF_state_degraded;
4180 } else {
4181 if (state == DDF_state_failed || other == DDF_state_failed)
4182 return DDF_state_failed;
4183 if (state == DDF_state_degraded || other == DDF_state_degraded)
4184 return DDF_state_degraded;
4185 return DDF_state_part_optimal;
4186 }
4187 }
4188
4189 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4190 {
4191 int state = get_bvd_state(ddf, &vcl->conf);
4192 unsigned int i;
4193 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4194 state = secondary_state(
4195 state,
4196 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4197 vcl->conf.srl);
4198 }
4199 return state;
4200 }
4201
4202 /*
4203 * The state of each disk is stored in the global phys_disk structure
4204 * in phys_disk.entries[n].state.
4205 * This makes various combinations awkward.
4206 * - When a device fails in any array, it must be failed in all arrays
4207 * that include a part of this device.
4208 * - When a component is rebuilding, we cannot include it officially in the
4209 * array unless this is the only array that uses the device.
4210 *
4211 * So: when transitioning:
4212 * Online -> failed, just set failed flag. monitor will propagate
4213 * spare -> online, the device might need to be added to the array.
4214 * spare -> failed, just set failed. Don't worry if in array or not.
4215 */
4216 static void ddf_set_disk(struct active_array *a, int n, int state)
4217 {
4218 struct ddf_super *ddf = a->container->sb;
4219 unsigned int inst = a->info.container_member, n_bvd;
4220 struct vcl *vcl;
4221 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4222 &n_bvd, &vcl);
4223 int pd;
4224 struct mdinfo *mdi;
4225 struct dl *dl;
4226
4227 dprintf("%s: %d to %x\n", __func__, n, state);
4228 if (vc == NULL) {
4229 dprintf("ddf: cannot find instance %d!!\n", inst);
4230 return;
4231 }
4232 /* Find the matching slot in 'info'. */
4233 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4234 if (mdi->disk.raid_disk == n)
4235 break;
4236 if (!mdi) {
4237 pr_err("%s: cannot find raid disk %d\n",
4238 __func__, n);
4239 return;
4240 }
4241
4242 /* and find the 'dl' entry corresponding to that. */
4243 for (dl = ddf->dlist; dl; dl = dl->next)
4244 if (mdi->state_fd >= 0 &&
4245 mdi->disk.major == dl->major &&
4246 mdi->disk.minor == dl->minor)
4247 break;
4248 if (!dl) {
4249 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4250 __func__, n,
4251 mdi->disk.major, mdi->disk.minor);
4252 return;
4253 }
4254
4255 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4256 if (pd < 0 || pd != dl->pdnum) {
4257 /* disk doesn't currently exist or has changed.
4258 * If it is now in_sync, insert it. */
4259 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4260 __func__, dl->pdnum, dl->major, dl->minor,
4261 be32_to_cpu(dl->disk.refnum));
4262 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4263 __func__, inst, n_bvd,
4264 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4265 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4266 pd = dl->pdnum; /* FIXME: is this really correct ? */
4267 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4268 LBA_OFFSET(ddf, vc)[n_bvd] =
4269 cpu_to_be64(mdi->data_offset);
4270 be16_clear(ddf->phys->entries[pd].type,
4271 cpu_to_be16(DDF_Global_Spare));
4272 be16_set(ddf->phys->entries[pd].type,
4273 cpu_to_be16(DDF_Active_in_VD));
4274 ddf_set_updates_pending(ddf);
4275 }
4276 } else {
4277 be16 old = ddf->phys->entries[pd].state;
4278 if (state & DS_FAULTY)
4279 be16_set(ddf->phys->entries[pd].state,
4280 cpu_to_be16(DDF_Failed));
4281 if (state & DS_INSYNC) {
4282 be16_set(ddf->phys->entries[pd].state,
4283 cpu_to_be16(DDF_Online));
4284 be16_clear(ddf->phys->entries[pd].state,
4285 cpu_to_be16(DDF_Rebuilding));
4286 }
4287 if (!be16_eq(old, ddf->phys->entries[pd].state))
4288 ddf_set_updates_pending(ddf);
4289 }
4290
4291 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4292 be32_to_cpu(dl->disk.refnum), state,
4293 be16_to_cpu(ddf->phys->entries[pd].state));
4294
4295 /* Now we need to check the state of the array and update
4296 * virtual_disk.entries[n].state.
4297 * It needs to be one of "optimal", "degraded", "failed".
4298 * I don't understand 'deleted' or 'missing'.
4299 */
4300 state = get_svd_state(ddf, vcl);
4301
4302 if (ddf->virt->entries[inst].state !=
4303 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4304 | state)) {
4305
4306 ddf->virt->entries[inst].state =
4307 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4308 | state;
4309 ddf_set_updates_pending(ddf);
4310 }
4311
4312 }
4313
4314 static void ddf_sync_metadata(struct supertype *st)
4315 {
4316
4317 /*
4318 * Write all data to all devices.
4319 * Later, we might be able to track whether only local changes
4320 * have been made, or whether any global data has been changed,
4321 * but ddf is sufficiently weird that it probably always
4322 * changes global data ....
4323 */
4324 struct ddf_super *ddf = st->sb;
4325 if (!ddf->updates_pending)
4326 return;
4327 ddf->updates_pending = 0;
4328 __write_init_super_ddf(st);
4329 dprintf("ddf: sync_metadata\n");
4330 }
4331
4332 static int del_from_conflist(struct vcl **list, const char *guid)
4333 {
4334 struct vcl **p;
4335 int found = 0;
4336 for (p = list; p && *p; p = &((*p)->next))
4337 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4338 found = 1;
4339 *p = (*p)->next;
4340 }
4341 return found;
4342 }
4343
4344 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4345 {
4346 struct dl *dl;
4347 unsigned int vdnum, i;
4348 vdnum = find_vde_by_guid(ddf, guid);
4349 if (vdnum == DDF_NOTFOUND) {
4350 pr_err("%s: could not find VD %s\n", __func__,
4351 guid_str(guid));
4352 return -1;
4353 }
4354 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4355 pr_err("%s: could not find conf %s\n", __func__,
4356 guid_str(guid));
4357 return -1;
4358 }
4359 for (dl = ddf->dlist; dl; dl = dl->next)
4360 for (i = 0; i < ddf->max_part; i++)
4361 if (dl->vlist[i] != NULL &&
4362 !memcmp(dl->vlist[i]->conf.guid, guid,
4363 DDF_GUID_LEN))
4364 dl->vlist[i] = NULL;
4365 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4366 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4367 return 0;
4368 }
4369
4370 static int kill_subarray_ddf(struct supertype *st)
4371 {
4372 struct ddf_super *ddf = st->sb;
4373 /*
4374 * currentconf is set in container_content_ddf,
4375 * called with subarray arg
4376 */
4377 struct vcl *victim = ddf->currentconf;
4378 struct vd_config *conf;
4379 ddf->currentconf = NULL;
4380 unsigned int vdnum;
4381 if (!victim) {
4382 pr_err("%s: nothing to kill\n", __func__);
4383 return -1;
4384 }
4385 conf = &victim->conf;
4386 vdnum = find_vde_by_guid(ddf, conf->guid);
4387 if (vdnum == DDF_NOTFOUND) {
4388 pr_err("%s: could not find VD %s\n", __func__,
4389 guid_str(conf->guid));
4390 return -1;
4391 }
4392 if (st->update_tail) {
4393 struct virtual_disk *vd;
4394 int len = sizeof(struct virtual_disk)
4395 + sizeof(struct virtual_entry);
4396 vd = xmalloc(len);
4397 if (vd == NULL) {
4398 pr_err("%s: failed to allocate %d bytes\n", __func__,
4399 len);
4400 return -1;
4401 }
4402 memset(vd, 0 , len);
4403 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4404 vd->populated_vdes = cpu_to_be16(0);
4405 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4406 /* we use DDF_state_deleted as marker */
4407 vd->entries[0].state = DDF_state_deleted;
4408 append_metadata_update(st, vd, len);
4409 } else {
4410 _kill_subarray_ddf(ddf, conf->guid);
4411 ddf_set_updates_pending(ddf);
4412 ddf_sync_metadata(st);
4413 }
4414 return 0;
4415 }
4416
4417 static void copy_matching_bvd(struct ddf_super *ddf,
4418 struct vd_config *conf,
4419 const struct metadata_update *update)
4420 {
4421 unsigned int mppe =
4422 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4423 unsigned int len = ddf->conf_rec_len * 512;
4424 char *p;
4425 struct vd_config *vc;
4426 for (p = update->buf; p < update->buf + update->len; p += len) {
4427 vc = (struct vd_config *) p;
4428 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4429 memcpy(conf->phys_refnum, vc->phys_refnum,
4430 mppe * (sizeof(__u32) + sizeof(__u64)));
4431 return;
4432 }
4433 }
4434 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4435 conf->sec_elmnt_seq, guid_str(conf->guid));
4436 }
4437
4438 static void ddf_process_update(struct supertype *st,
4439 struct metadata_update *update)
4440 {
4441 /* Apply this update to the metadata.
4442 * The first 4 bytes are a DDF_*_MAGIC which guides
4443 * our actions.
4444 * Possible update are:
4445 * DDF_PHYS_RECORDS_MAGIC
4446 * Add a new physical device or remove an old one.
4447 * Changes to this record only happen implicitly.
4448 * used_pdes is the device number.
4449 * DDF_VIRT_RECORDS_MAGIC
4450 * Add a new VD. Possibly also change the 'access' bits.
4451 * populated_vdes is the entry number.
4452 * DDF_VD_CONF_MAGIC
4453 * New or updated VD. the VIRT_RECORD must already
4454 * exist. For an update, phys_refnum and lba_offset
4455 * (at least) are updated, and the VD_CONF must
4456 * be written to precisely those devices listed with
4457 * a phys_refnum.
4458 * DDF_SPARE_ASSIGN_MAGIC
4459 * replacement Spare Assignment Record... but for which device?
4460 *
4461 * So, e.g.:
4462 * - to create a new array, we send a VIRT_RECORD and
4463 * a VD_CONF. Then assemble and start the array.
4464 * - to activate a spare we send a VD_CONF to add the phys_refnum
4465 * and offset. This will also mark the spare as active with
4466 * a spare-assignment record.
4467 */
4468 struct ddf_super *ddf = st->sb;
4469 be32 *magic = (be32 *)update->buf;
4470 struct phys_disk *pd;
4471 struct virtual_disk *vd;
4472 struct vd_config *vc;
4473 struct vcl *vcl;
4474 struct dl *dl;
4475 unsigned int ent;
4476 unsigned int pdnum, pd2, len;
4477
4478 dprintf("Process update %x\n", be32_to_cpu(*magic));
4479
4480 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4481
4482 if (update->len != (sizeof(struct phys_disk) +
4483 sizeof(struct phys_disk_entry)))
4484 return;
4485 pd = (struct phys_disk*)update->buf;
4486
4487 ent = be16_to_cpu(pd->used_pdes);
4488 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4489 return;
4490 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4491 struct dl **dlp;
4492 /* removing this disk. */
4493 be16_set(ddf->phys->entries[ent].state,
4494 cpu_to_be16(DDF_Missing));
4495 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4496 struct dl *dl = *dlp;
4497 if (dl->pdnum == (signed)ent) {
4498 close(dl->fd);
4499 dl->fd = -1;
4500 /* FIXME this doesn't free
4501 * dl->devname */
4502 update->space = dl;
4503 *dlp = dl->next;
4504 break;
4505 }
4506 }
4507 ddf_set_updates_pending(ddf);
4508 return;
4509 }
4510 if (!all_ff(ddf->phys->entries[ent].guid))
4511 return;
4512 ddf->phys->entries[ent] = pd->entries[0];
4513 ddf->phys->used_pdes = cpu_to_be16
4514 (1 + be16_to_cpu(ddf->phys->used_pdes));
4515 ddf_set_updates_pending(ddf);
4516 if (ddf->add_list) {
4517 struct active_array *a;
4518 struct dl *al = ddf->add_list;
4519 ddf->add_list = al->next;
4520
4521 al->next = ddf->dlist;
4522 ddf->dlist = al;
4523
4524 /* As a device has been added, we should check
4525 * for any degraded devices that might make
4526 * use of this spare */
4527 for (a = st->arrays ; a; a=a->next)
4528 a->check_degraded = 1;
4529 }
4530 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4531
4532 if (update->len != (sizeof(struct virtual_disk) +
4533 sizeof(struct virtual_entry)))
4534 return;
4535 vd = (struct virtual_disk*)update->buf;
4536
4537 if (vd->entries[0].state == DDF_state_deleted) {
4538 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4539 return;
4540 } else {
4541
4542 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4543 if (ent != DDF_NOTFOUND) {
4544 dprintf("%s: VD %s exists already in slot %d\n",
4545 __func__, guid_str(vd->entries[0].guid),
4546 ent);
4547 return;
4548 }
4549 ent = find_unused_vde(ddf);
4550 if (ent == DDF_NOTFOUND)
4551 return;
4552 ddf->virt->entries[ent] = vd->entries[0];
4553 ddf->virt->populated_vdes =
4554 cpu_to_be16(
4555 1 + be16_to_cpu(
4556 ddf->virt->populated_vdes));
4557 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4558 __func__, guid_str(vd->entries[0].guid), ent,
4559 ddf->virt->entries[ent].state,
4560 ddf->virt->entries[ent].init_state);
4561 }
4562 ddf_set_updates_pending(ddf);
4563 }
4564
4565 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4566 vc = (struct vd_config*)update->buf;
4567 len = ddf->conf_rec_len * 512;
4568 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4569 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4570 __func__, guid_str(vc->guid), update->len,
4571 vc->sec_elmnt_count);
4572 return;
4573 }
4574 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4575 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4576 break;
4577 dprintf("%s: conf update for %s (%s)\n", __func__,
4578 guid_str(vc->guid), (vcl ? "old" : "new"));
4579 if (vcl) {
4580 /* An update, just copy the phys_refnum and lba_offset
4581 * fields
4582 */
4583 unsigned int i;
4584 unsigned int k;
4585 copy_matching_bvd(ddf, &vcl->conf, update);
4586 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4587 dprintf("BVD %u has %08x at %llu\n", 0,
4588 be32_to_cpu(vcl->conf.phys_refnum[k]),
4589 be64_to_cpu(LBA_OFFSET(ddf,
4590 &vcl->conf)[k]));
4591 for (i = 1; i < vc->sec_elmnt_count; i++) {
4592 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4593 update);
4594 for (k = 0; k < be16_to_cpu(
4595 vc->prim_elmnt_count); k++)
4596 dprintf("BVD %u has %08x at %llu\n", i,
4597 be32_to_cpu
4598 (vcl->other_bvds[i-1]->
4599 phys_refnum[k]),
4600 be64_to_cpu
4601 (LBA_OFFSET
4602 (ddf,
4603 vcl->other_bvds[i-1])[k]));
4604 }
4605 } else {
4606 /* A new VD_CONF */
4607 unsigned int i;
4608 if (!update->space)
4609 return;
4610 vcl = update->space;
4611 update->space = NULL;
4612 vcl->next = ddf->conflist;
4613 memcpy(&vcl->conf, vc, len);
4614 ent = find_vde_by_guid(ddf, vc->guid);
4615 if (ent == DDF_NOTFOUND)
4616 return;
4617 vcl->vcnum = ent;
4618 ddf->conflist = vcl;
4619 for (i = 1; i < vc->sec_elmnt_count; i++)
4620 memcpy(vcl->other_bvds[i-1],
4621 update->buf + len * i, len);
4622 }
4623 /* Set DDF_Transition on all Failed devices - to help
4624 * us detect those that are no longer in use
4625 */
4626 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4627 pdnum++)
4628 if (be16_and(ddf->phys->entries[pdnum].state,
4629 cpu_to_be16(DDF_Failed)))
4630 be16_set(ddf->phys->entries[pdnum].state,
4631 cpu_to_be16(DDF_Transition));
4632 /* Now make sure vlist is correct for each dl. */
4633 for (dl = ddf->dlist; dl; dl = dl->next) {
4634 unsigned int vn = 0;
4635 int in_degraded = 0;
4636 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4637 unsigned int dn, ibvd;
4638 const struct vd_config *conf;
4639 int vstate;
4640 dn = get_pd_index_from_refnum(vcl,
4641 dl->disk.refnum,
4642 ddf->mppe,
4643 &conf, &ibvd);
4644 if (dn == DDF_NOTFOUND)
4645 continue;
4646 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4647 dl->pdnum,
4648 be32_to_cpu(dl->disk.refnum),
4649 guid_str(conf->guid),
4650 conf->sec_elmnt_seq, vn);
4651 /* Clear the Transition flag */
4652 if (be16_and
4653 (ddf->phys->entries[dl->pdnum].state,
4654 cpu_to_be16(DDF_Failed)))
4655 be16_clear(ddf->phys
4656 ->entries[dl->pdnum].state,
4657 cpu_to_be16(DDF_Transition));
4658 dl->vlist[vn++] = vcl;
4659 vstate = ddf->virt->entries[vcl->vcnum].state
4660 & DDF_state_mask;
4661 if (vstate == DDF_state_degraded ||
4662 vstate == DDF_state_part_optimal)
4663 in_degraded = 1;
4664 }
4665 while (vn < ddf->max_part)
4666 dl->vlist[vn++] = NULL;
4667 if (dl->vlist[0]) {
4668 be16_clear(ddf->phys->entries[dl->pdnum].type,
4669 cpu_to_be16(DDF_Global_Spare));
4670 if (!be16_and(ddf->phys
4671 ->entries[dl->pdnum].type,
4672 cpu_to_be16(DDF_Active_in_VD))) {
4673 be16_set(ddf->phys
4674 ->entries[dl->pdnum].type,
4675 cpu_to_be16(DDF_Active_in_VD));
4676 if (in_degraded)
4677 be16_set(ddf->phys
4678 ->entries[dl->pdnum]
4679 .state,
4680 cpu_to_be16
4681 (DDF_Rebuilding));
4682 }
4683 }
4684 if (dl->spare) {
4685 be16_clear(ddf->phys->entries[dl->pdnum].type,
4686 cpu_to_be16(DDF_Global_Spare));
4687 be16_set(ddf->phys->entries[dl->pdnum].type,
4688 cpu_to_be16(DDF_Spare));
4689 }
4690 if (!dl->vlist[0] && !dl->spare) {
4691 be16_set(ddf->phys->entries[dl->pdnum].type,
4692 cpu_to_be16(DDF_Global_Spare));
4693 be16_clear(ddf->phys->entries[dl->pdnum].type,
4694 cpu_to_be16(DDF_Spare));
4695 be16_clear(ddf->phys->entries[dl->pdnum].type,
4696 cpu_to_be16(DDF_Active_in_VD));
4697 }
4698 }
4699
4700 /* Now remove any 'Failed' devices that are not part
4701 * of any VD. They will have the Transition flag set.
4702 * Once done, we need to update all dl->pdnum numbers.
4703 */
4704 pd2 = 0;
4705 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4706 pdnum++) {
4707 if (be16_and(ddf->phys->entries[pdnum].state,
4708 cpu_to_be16(DDF_Failed))
4709 && be16_and(ddf->phys->entries[pdnum].state,
4710 cpu_to_be16(DDF_Transition))) {
4711 /* skip this one unless in dlist*/
4712 for (dl = ddf->dlist; dl; dl = dl->next)
4713 if (dl->pdnum == (int)pdnum)
4714 break;
4715 if (!dl)
4716 continue;
4717 }
4718 if (pdnum == pd2)
4719 pd2++;
4720 else {
4721 ddf->phys->entries[pd2] =
4722 ddf->phys->entries[pdnum];
4723 for (dl = ddf->dlist; dl; dl = dl->next)
4724 if (dl->pdnum == (int)pdnum)
4725 dl->pdnum = pd2;
4726 pd2++;
4727 }
4728 }
4729 ddf->phys->used_pdes = cpu_to_be16(pd2);
4730 while (pd2 < pdnum) {
4731 memset(ddf->phys->entries[pd2].guid, 0xff,
4732 DDF_GUID_LEN);
4733 pd2++;
4734 }
4735
4736 ddf_set_updates_pending(ddf);
4737 }
4738 /* case DDF_SPARE_ASSIGN_MAGIC */
4739 }
4740
4741 static void ddf_prepare_update(struct supertype *st,
4742 struct metadata_update *update)
4743 {
4744 /* This update arrived at managemon.
4745 * We are about to pass it to monitor.
4746 * If a malloc is needed, do it here.
4747 */
4748 struct ddf_super *ddf = st->sb;
4749 be32 *magic = (be32 *)update->buf;
4750 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4751 struct vcl *vcl;
4752 struct vd_config *conf = (struct vd_config *) update->buf;
4753 if (posix_memalign(&update->space, 512,
4754 offsetof(struct vcl, conf)
4755 + ddf->conf_rec_len * 512) != 0) {
4756 update->space = NULL;
4757 return;
4758 }
4759 vcl = update->space;
4760 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4761 if (alloc_other_bvds(ddf, vcl) != 0) {
4762 free(update->space);
4763 update->space = NULL;
4764 }
4765 }
4766 }
4767
4768 /*
4769 * Check degraded state of a RAID10.
4770 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4771 */
4772 static int raid10_degraded(struct mdinfo *info)
4773 {
4774 int n_prim, n_bvds;
4775 int i;
4776 struct mdinfo *d;
4777 char *found;
4778 int ret = -1;
4779
4780 n_prim = info->array.layout & ~0x100;
4781 n_bvds = info->array.raid_disks / n_prim;
4782 found = xmalloc(n_bvds);
4783 if (found == NULL)
4784 return ret;
4785 memset(found, 0, n_bvds);
4786 for (d = info->devs; d; d = d->next) {
4787 i = d->disk.raid_disk / n_prim;
4788 if (i >= n_bvds) {
4789 pr_err("%s: BUG: invalid raid disk\n", __func__);
4790 goto out;
4791 }
4792 if (d->state_fd > 0)
4793 found[i]++;
4794 }
4795 ret = 2;
4796 for (i = 0; i < n_bvds; i++)
4797 if (!found[i]) {
4798 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4799 ret = 0;
4800 goto out;
4801 } else if (found[i] < n_prim) {
4802 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4803 n_bvds);
4804 ret = 1;
4805 }
4806 out:
4807 free(found);
4808 return ret;
4809 }
4810
4811 /*
4812 * Check if the array 'a' is degraded but not failed.
4813 * If it is, find as many spares as are available and needed and
4814 * arrange for their inclusion.
4815 * We only choose devices which are not already in the array,
4816 * and prefer those with a spare-assignment to this array.
4817 * otherwise we choose global spares - assuming always that
4818 * there is enough room.
4819 * For each spare that we assign, we return an 'mdinfo' which
4820 * describes the position for the device in the array.
4821 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4822 * the new phys_refnum and lba_offset values.
4823 *
4824 * Only worry about BVDs at the moment.
4825 */
4826 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4827 struct metadata_update **updates)
4828 {
4829 int working = 0;
4830 struct mdinfo *d;
4831 struct ddf_super *ddf = a->container->sb;
4832 int global_ok = 0;
4833 struct mdinfo *rv = NULL;
4834 struct mdinfo *di;
4835 struct metadata_update *mu;
4836 struct dl *dl;
4837 int i;
4838 unsigned int j;
4839 struct vcl *vcl;
4840 struct vd_config *vc;
4841 unsigned int n_bvd;
4842
4843 for (d = a->info.devs ; d ; d = d->next) {
4844 if ((d->curr_state & DS_FAULTY) &&
4845 d->state_fd >= 0)
4846 /* wait for Removal to happen */
4847 return NULL;
4848 if (d->state_fd >= 0)
4849 working ++;
4850 }
4851
4852 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4853 a->info.array.raid_disks,
4854 a->info.array.level);
4855 if (working == a->info.array.raid_disks)
4856 return NULL; /* array not degraded */
4857 switch (a->info.array.level) {
4858 case 1:
4859 if (working == 0)
4860 return NULL; /* failed */
4861 break;
4862 case 4:
4863 case 5:
4864 if (working < a->info.array.raid_disks - 1)
4865 return NULL; /* failed */
4866 break;
4867 case 6:
4868 if (working < a->info.array.raid_disks - 2)
4869 return NULL; /* failed */
4870 break;
4871 case 10:
4872 if (raid10_degraded(&a->info) < 1)
4873 return NULL;
4874 break;
4875 default: /* concat or stripe */
4876 return NULL; /* failed */
4877 }
4878
4879 /* For each slot, if it is not working, find a spare */
4880 dl = ddf->dlist;
4881 for (i = 0; i < a->info.array.raid_disks; i++) {
4882 for (d = a->info.devs ; d ; d = d->next)
4883 if (d->disk.raid_disk == i)
4884 break;
4885 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4886 if (d && (d->state_fd >= 0))
4887 continue;
4888
4889 /* OK, this device needs recovery. Find a spare */
4890 again:
4891 for ( ; dl ; dl = dl->next) {
4892 unsigned long long esize;
4893 unsigned long long pos;
4894 struct mdinfo *d2;
4895 int is_global = 0;
4896 int is_dedicated = 0;
4897 struct extent *ex;
4898 unsigned int j;
4899 be16 state = ddf->phys->entries[dl->pdnum].state;
4900 if (be16_and(state,
4901 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4902 !be16_and(state,
4903 cpu_to_be16(DDF_Online)))
4904 continue;
4905
4906 /* If in this array, skip */
4907 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4908 if (d2->state_fd >= 0 &&
4909 d2->disk.major == dl->major &&
4910 d2->disk.minor == dl->minor) {
4911 dprintf("%x:%x (%08x) already in array\n",
4912 dl->major, dl->minor,
4913 be32_to_cpu(dl->disk.refnum));
4914 break;
4915 }
4916 if (d2)
4917 continue;
4918 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4919 cpu_to_be16(DDF_Spare))) {
4920 /* Check spare assign record */
4921 if (dl->spare) {
4922 if (dl->spare->type & DDF_spare_dedicated) {
4923 /* check spare_ents for guid */
4924 for (j = 0 ;
4925 j < be16_to_cpu
4926 (dl->spare
4927 ->populated);
4928 j++) {
4929 if (memcmp(dl->spare->spare_ents[j].guid,
4930 ddf->virt->entries[a->info.container_member].guid,
4931 DDF_GUID_LEN) == 0)
4932 is_dedicated = 1;
4933 }
4934 } else
4935 is_global = 1;
4936 }
4937 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
4938 cpu_to_be16(DDF_Global_Spare))) {
4939 is_global = 1;
4940 } else if (!be16_and(ddf->phys
4941 ->entries[dl->pdnum].state,
4942 cpu_to_be16(DDF_Failed))) {
4943 /* we can possibly use some of this */
4944 is_global = 1;
4945 }
4946 if ( ! (is_dedicated ||
4947 (is_global && global_ok))) {
4948 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4949 is_dedicated, is_global);
4950 continue;
4951 }
4952
4953 /* We are allowed to use this device - is there space?
4954 * We need a->info.component_size sectors */
4955 ex = get_extents(ddf, dl);
4956 if (!ex) {
4957 dprintf("cannot get extents\n");
4958 continue;
4959 }
4960 j = 0; pos = 0;
4961 esize = 0;
4962
4963 do {
4964 esize = ex[j].start - pos;
4965 if (esize >= a->info.component_size)
4966 break;
4967 pos = ex[j].start + ex[j].size;
4968 j++;
4969 } while (ex[j-1].size);
4970
4971 free(ex);
4972 if (esize < a->info.component_size) {
4973 dprintf("%x:%x has no room: %llu %llu\n",
4974 dl->major, dl->minor,
4975 esize, a->info.component_size);
4976 /* No room */
4977 continue;
4978 }
4979
4980 /* Cool, we have a device with some space at pos */
4981 di = xcalloc(1, sizeof(*di));
4982 di->disk.number = i;
4983 di->disk.raid_disk = i;
4984 di->disk.major = dl->major;
4985 di->disk.minor = dl->minor;
4986 di->disk.state = 0;
4987 di->recovery_start = 0;
4988 di->data_offset = pos;
4989 di->component_size = a->info.component_size;
4990 di->container_member = dl->pdnum;
4991 di->next = rv;
4992 rv = di;
4993 dprintf("%x:%x (%08x) to be %d at %llu\n",
4994 dl->major, dl->minor,
4995 be32_to_cpu(dl->disk.refnum), i, pos);
4996
4997 break;
4998 }
4999 if (!dl && ! global_ok) {
5000 /* not enough dedicated spares, try global */
5001 global_ok = 1;
5002 dl = ddf->dlist;
5003 goto again;
5004 }
5005 }
5006
5007 if (!rv)
5008 /* No spares found */
5009 return rv;
5010 /* Now 'rv' has a list of devices to return.
5011 * Create a metadata_update record to update the
5012 * phys_refnum and lba_offset values
5013 */
5014 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5015 &n_bvd, &vcl);
5016 if (vc == NULL)
5017 return NULL;
5018
5019 mu = xmalloc(sizeof(*mu));
5020 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5021 free(mu);
5022 mu = NULL;
5023 }
5024
5025 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5026 mu->buf = xmalloc(mu->len);
5027 mu->space = NULL;
5028 mu->space_list = NULL;
5029 mu->next = *updates;
5030 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5031 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5032 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5033 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5034
5035 vc = (struct vd_config*)mu->buf;
5036 for (di = rv ; di ; di = di->next) {
5037 unsigned int i_sec, i_prim;
5038 i_sec = di->disk.raid_disk
5039 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5040 i_prim = di->disk.raid_disk
5041 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5042 vc = (struct vd_config *)(mu->buf
5043 + i_sec * ddf->conf_rec_len * 512);
5044 for (dl = ddf->dlist; dl; dl = dl->next)
5045 if (dl->major == di->disk.major
5046 && dl->minor == di->disk.minor)
5047 break;
5048 if (!dl) {
5049 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
5050 __func__, di->disk.raid_disk,
5051 di->disk.major, di->disk.minor);
5052 return NULL;
5053 }
5054 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5055 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5056 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5057 be32_to_cpu(vc->phys_refnum[i_prim]),
5058 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5059 }
5060 *updates = mu;
5061 return rv;
5062 }
5063 #endif /* MDASSEMBLE */
5064
5065 static int ddf_level_to_layout(int level)
5066 {
5067 switch(level) {
5068 case 0:
5069 case 1:
5070 return 0;
5071 case 5:
5072 return ALGORITHM_LEFT_SYMMETRIC;
5073 case 6:
5074 return ALGORITHM_ROTATING_N_CONTINUE;
5075 case 10:
5076 return 0x102;
5077 default:
5078 return UnSet;
5079 }
5080 }
5081
5082 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5083 {
5084 if (level && *level == UnSet)
5085 *level = LEVEL_CONTAINER;
5086
5087 if (level && layout && *layout == UnSet)
5088 *layout = ddf_level_to_layout(*level);
5089 }
5090
5091 struct superswitch super_ddf = {
5092 #ifndef MDASSEMBLE
5093 .examine_super = examine_super_ddf,
5094 .brief_examine_super = brief_examine_super_ddf,
5095 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5096 .export_examine_super = export_examine_super_ddf,
5097 .detail_super = detail_super_ddf,
5098 .brief_detail_super = brief_detail_super_ddf,
5099 .validate_geometry = validate_geometry_ddf,
5100 .write_init_super = write_init_super_ddf,
5101 .add_to_super = add_to_super_ddf,
5102 .remove_from_super = remove_from_super_ddf,
5103 .load_container = load_container_ddf,
5104 .copy_metadata = copy_metadata_ddf,
5105 .kill_subarray = kill_subarray_ddf,
5106 #endif
5107 .match_home = match_home_ddf,
5108 .uuid_from_super= uuid_from_super_ddf,
5109 .getinfo_super = getinfo_super_ddf,
5110 .update_super = update_super_ddf,
5111
5112 .avail_size = avail_size_ddf,
5113
5114 .compare_super = compare_super_ddf,
5115
5116 .load_super = load_super_ddf,
5117 .init_super = init_super_ddf,
5118 .store_super = store_super_ddf,
5119 .free_super = free_super_ddf,
5120 .match_metadata_desc = match_metadata_desc_ddf,
5121 .container_content = container_content_ddf,
5122 .default_geometry = default_geometry_ddf,
5123
5124 .external = 1,
5125
5126 #ifndef MDASSEMBLE
5127 /* for mdmon */
5128 .open_new = ddf_open_new,
5129 .set_array_state= ddf_set_array_state,
5130 .set_disk = ddf_set_disk,
5131 .sync_metadata = ddf_sync_metadata,
5132 .process_update = ddf_process_update,
5133 .prepare_update = ddf_prepare_update,
5134 .activate_spare = ddf_activate_spare,
5135 #endif
5136 .name = "ddf",
5137 };