]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: compare_super_ddf: fix sequence number check
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* Default for safe_mode_delay. Same value as for IMSM.
51 */
52 static const int DDF_SAFE_MODE_DELAY = 4000;
53
54 /* The DDF metadata handling.
55 * DDF metadata lives at the end of the device.
56 * The last 512 byte block provides an 'anchor' which is used to locate
57 * the rest of the metadata which usually lives immediately behind the anchor.
58 *
59 * Note:
60 * - all multibyte numeric fields are bigendian.
61 * - all strings are space padded.
62 *
63 */
64
65 typedef struct __be16 {
66 __u16 _v16;
67 } be16;
68 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
69 #define be16_and(x, y) ((x)._v16 & (y)._v16)
70 #define be16_or(x, y) ((x)._v16 | (y)._v16)
71 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
72 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
73
74 typedef struct __be32 {
75 __u32 _v32;
76 } be32;
77 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
78
79 typedef struct __be64 {
80 __u64 _v64;
81 } be64;
82 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
83
84 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
85 static inline be16 cpu_to_be16(__u16 x)
86 {
87 be16 be = { ._v16 = __cpu_to_be16(x) };
88 return be;
89 }
90
91 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
92 static inline be32 cpu_to_be32(__u32 x)
93 {
94 be32 be = { ._v32 = __cpu_to_be32(x) };
95 return be;
96 }
97
98 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
99 static inline be64 cpu_to_be64(__u64 x)
100 {
101 be64 be = { ._v64 = __cpu_to_be64(x) };
102 return be;
103 }
104
105 /* Primary Raid Level (PRL) */
106 #define DDF_RAID0 0x00
107 #define DDF_RAID1 0x01
108 #define DDF_RAID3 0x03
109 #define DDF_RAID4 0x04
110 #define DDF_RAID5 0x05
111 #define DDF_RAID1E 0x11
112 #define DDF_JBOD 0x0f
113 #define DDF_CONCAT 0x1f
114 #define DDF_RAID5E 0x15
115 #define DDF_RAID5EE 0x25
116 #define DDF_RAID6 0x06
117
118 /* Raid Level Qualifier (RLQ) */
119 #define DDF_RAID0_SIMPLE 0x00
120 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
121 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
122 #define DDF_RAID3_0 0x00 /* parity in first extent */
123 #define DDF_RAID3_N 0x01 /* parity in last extent */
124 #define DDF_RAID4_0 0x00 /* parity in first extent */
125 #define DDF_RAID4_N 0x01 /* parity in last extent */
126 /* these apply to raid5e and raid5ee as well */
127 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
128 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
129 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
130 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
131
132 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
133 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
134
135 /* Secondary RAID Level (SRL) */
136 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
137 #define DDF_2MIRRORED 0x01
138 #define DDF_2CONCAT 0x02
139 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
140
141 /* Magic numbers */
142 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
143 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
144 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
145 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
146 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
147 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
148 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
149 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
150 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
151 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
152
153 #define DDF_GUID_LEN 24
154 #define DDF_REVISION_0 "01.00.00"
155 #define DDF_REVISION_2 "01.02.00"
156
157 struct ddf_header {
158 be32 magic; /* DDF_HEADER_MAGIC */
159 be32 crc;
160 char guid[DDF_GUID_LEN];
161 char revision[8]; /* 01.02.00 */
162 be32 seq; /* starts at '1' */
163 be32 timestamp;
164 __u8 openflag;
165 __u8 foreignflag;
166 __u8 enforcegroups;
167 __u8 pad0; /* 0xff */
168 __u8 pad1[12]; /* 12 * 0xff */
169 /* 64 bytes so far */
170 __u8 header_ext[32]; /* reserved: fill with 0xff */
171 be64 primary_lba;
172 be64 secondary_lba;
173 __u8 type;
174 __u8 pad2[3]; /* 0xff */
175 be32 workspace_len; /* sectors for vendor space -
176 * at least 32768(sectors) */
177 be64 workspace_lba;
178 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
179 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
180 be16 max_partitions; /* i.e. max num of configuration
181 record entries per disk */
182 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
183 *12/512) */
184 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
185 __u8 pad3[54]; /* 0xff */
186 /* 192 bytes so far */
187 be32 controller_section_offset;
188 be32 controller_section_length;
189 be32 phys_section_offset;
190 be32 phys_section_length;
191 be32 virt_section_offset;
192 be32 virt_section_length;
193 be32 config_section_offset;
194 be32 config_section_length;
195 be32 data_section_offset;
196 be32 data_section_length;
197 be32 bbm_section_offset;
198 be32 bbm_section_length;
199 be32 diag_space_offset;
200 be32 diag_space_length;
201 be32 vendor_offset;
202 be32 vendor_length;
203 /* 256 bytes so far */
204 __u8 pad4[256]; /* 0xff */
205 };
206
207 /* type field */
208 #define DDF_HEADER_ANCHOR 0x00
209 #define DDF_HEADER_PRIMARY 0x01
210 #define DDF_HEADER_SECONDARY 0x02
211
212 /* The content of the 'controller section' - global scope */
213 struct ddf_controller_data {
214 be32 magic; /* DDF_CONTROLLER_MAGIC */
215 be32 crc;
216 char guid[DDF_GUID_LEN];
217 struct controller_type {
218 be16 vendor_id;
219 be16 device_id;
220 be16 sub_vendor_id;
221 be16 sub_device_id;
222 } type;
223 char product_id[16];
224 __u8 pad[8]; /* 0xff */
225 __u8 vendor_data[448];
226 };
227
228 /* The content of phys_section - global scope */
229 struct phys_disk {
230 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
231 be32 crc;
232 be16 used_pdes;
233 be16 max_pdes;
234 __u8 pad[52];
235 struct phys_disk_entry {
236 char guid[DDF_GUID_LEN];
237 be32 refnum;
238 be16 type;
239 be16 state;
240 be64 config_size; /* DDF structures must be after here */
241 char path[18]; /* another horrible structure really */
242 __u8 pad[6];
243 } entries[0];
244 };
245
246 /* phys_disk_entry.type is a bitmap - bigendian remember */
247 #define DDF_Forced_PD_GUID 1
248 #define DDF_Active_in_VD 2
249 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
250 #define DDF_Spare 8 /* overrides Global_spare */
251 #define DDF_Foreign 16
252 #define DDF_Legacy 32 /* no DDF on this device */
253
254 #define DDF_Interface_mask 0xf00
255 #define DDF_Interface_SCSI 0x100
256 #define DDF_Interface_SAS 0x200
257 #define DDF_Interface_SATA 0x300
258 #define DDF_Interface_FC 0x400
259
260 /* phys_disk_entry.state is a bigendian bitmap */
261 #define DDF_Online 1
262 #define DDF_Failed 2 /* overrides 1,4,8 */
263 #define DDF_Rebuilding 4
264 #define DDF_Transition 8
265 #define DDF_SMART 16
266 #define DDF_ReadErrors 32
267 #define DDF_Missing 64
268
269 /* The content of the virt_section global scope */
270 struct virtual_disk {
271 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
272 be32 crc;
273 be16 populated_vdes;
274 be16 max_vdes;
275 __u8 pad[52];
276 struct virtual_entry {
277 char guid[DDF_GUID_LEN];
278 be16 unit;
279 __u16 pad0; /* 0xffff */
280 be16 guid_crc;
281 be16 type;
282 __u8 state;
283 __u8 init_state;
284 __u8 pad1[14];
285 char name[16];
286 } entries[0];
287 };
288
289 /* virtual_entry.type is a bitmap - bigendian */
290 #define DDF_Shared 1
291 #define DDF_Enforce_Groups 2
292 #define DDF_Unicode 4
293 #define DDF_Owner_Valid 8
294
295 /* virtual_entry.state is a bigendian bitmap */
296 #define DDF_state_mask 0x7
297 #define DDF_state_optimal 0x0
298 #define DDF_state_degraded 0x1
299 #define DDF_state_deleted 0x2
300 #define DDF_state_missing 0x3
301 #define DDF_state_failed 0x4
302 #define DDF_state_part_optimal 0x5
303
304 #define DDF_state_morphing 0x8
305 #define DDF_state_inconsistent 0x10
306
307 /* virtual_entry.init_state is a bigendian bitmap */
308 #define DDF_initstate_mask 0x03
309 #define DDF_init_not 0x00
310 #define DDF_init_quick 0x01 /* initialisation is progress.
311 * i.e. 'state_inconsistent' */
312 #define DDF_init_full 0x02
313
314 #define DDF_access_mask 0xc0
315 #define DDF_access_rw 0x00
316 #define DDF_access_ro 0x80
317 #define DDF_access_blocked 0xc0
318
319 /* The content of the config_section - local scope
320 * It has multiple records each config_record_len sectors
321 * They can be vd_config or spare_assign
322 */
323
324 struct vd_config {
325 be32 magic; /* DDF_VD_CONF_MAGIC */
326 be32 crc;
327 char guid[DDF_GUID_LEN];
328 be32 timestamp;
329 be32 seqnum;
330 __u8 pad0[24];
331 be16 prim_elmnt_count;
332 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
333 __u8 prl;
334 __u8 rlq;
335 __u8 sec_elmnt_count;
336 __u8 sec_elmnt_seq;
337 __u8 srl;
338 be64 blocks; /* blocks per component could be different
339 * on different component devices...(only
340 * for concat I hope) */
341 be64 array_blocks; /* blocks in array */
342 __u8 pad1[8];
343 be32 spare_refs[8];
344 __u8 cache_pol[8];
345 __u8 bg_rate;
346 __u8 pad2[3];
347 __u8 pad3[52];
348 __u8 pad4[192];
349 __u8 v0[32]; /* reserved- 0xff */
350 __u8 v1[32]; /* reserved- 0xff */
351 __u8 v2[16]; /* reserved- 0xff */
352 __u8 v3[16]; /* reserved- 0xff */
353 __u8 vendor[32];
354 be32 phys_refnum[0]; /* refnum of each disk in sequence */
355 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
356 bvd are always the same size */
357 };
358 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
359
360 /* vd_config.cache_pol[7] is a bitmap */
361 #define DDF_cache_writeback 1 /* else writethrough */
362 #define DDF_cache_wadaptive 2 /* only applies if writeback */
363 #define DDF_cache_readahead 4
364 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
365 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
366 #define DDF_cache_wallowed 32 /* enable write caching */
367 #define DDF_cache_rallowed 64 /* enable read caching */
368
369 struct spare_assign {
370 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
371 be32 crc;
372 be32 timestamp;
373 __u8 reserved[7];
374 __u8 type;
375 be16 populated; /* SAEs used */
376 be16 max; /* max SAEs */
377 __u8 pad[8];
378 struct spare_assign_entry {
379 char guid[DDF_GUID_LEN];
380 be16 secondary_element;
381 __u8 pad[6];
382 } spare_ents[0];
383 };
384 /* spare_assign.type is a bitmap */
385 #define DDF_spare_dedicated 0x1 /* else global */
386 #define DDF_spare_revertible 0x2 /* else committable */
387 #define DDF_spare_active 0x4 /* else not active */
388 #define DDF_spare_affinity 0x8 /* enclosure affinity */
389
390 /* The data_section contents - local scope */
391 struct disk_data {
392 be32 magic; /* DDF_PHYS_DATA_MAGIC */
393 be32 crc;
394 char guid[DDF_GUID_LEN];
395 be32 refnum; /* crc of some magic drive data ... */
396 __u8 forced_ref; /* set when above was not result of magic */
397 __u8 forced_guid; /* set if guid was forced rather than magic */
398 __u8 vendor[32];
399 __u8 pad[442];
400 };
401
402 /* bbm_section content */
403 struct bad_block_log {
404 be32 magic;
405 be32 crc;
406 be16 entry_count;
407 be32 spare_count;
408 __u8 pad[10];
409 be64 first_spare;
410 struct mapped_block {
411 be64 defective_start;
412 be32 replacement_start;
413 be16 remap_count;
414 __u8 pad[2];
415 } entries[0];
416 };
417
418 /* Struct for internally holding ddf structures */
419 /* The DDF structure stored on each device is potentially
420 * quite different, as some data is global and some is local.
421 * The global data is:
422 * - ddf header
423 * - controller_data
424 * - Physical disk records
425 * - Virtual disk records
426 * The local data is:
427 * - Configuration records
428 * - Physical Disk data section
429 * ( and Bad block and vendor which I don't care about yet).
430 *
431 * The local data is parsed into separate lists as it is read
432 * and reconstructed for writing. This means that we only need
433 * to make config changes once and they are automatically
434 * propagated to all devices.
435 * Note that the ddf_super has space of the conf and disk data
436 * for this disk and also for a list of all such data.
437 * The list is only used for the superblock that is being
438 * built in Create or Assemble to describe the whole array.
439 */
440 struct ddf_super {
441 struct ddf_header anchor, primary, secondary;
442 struct ddf_controller_data controller;
443 struct ddf_header *active;
444 struct phys_disk *phys;
445 struct virtual_disk *virt;
446 char *conf;
447 int pdsize, vdsize;
448 unsigned int max_part, mppe, conf_rec_len;
449 int currentdev;
450 int updates_pending;
451 struct vcl {
452 union {
453 char space[512];
454 struct {
455 struct vcl *next;
456 unsigned int vcnum; /* index into ->virt */
457 struct vd_config **other_bvds;
458 __u64 *block_sizes; /* NULL if all the same */
459 };
460 };
461 struct vd_config conf;
462 } *conflist, *currentconf;
463 struct dl {
464 union {
465 char space[512];
466 struct {
467 struct dl *next;
468 int major, minor;
469 char *devname;
470 int fd;
471 unsigned long long size; /* sectors */
472 be64 primary_lba; /* sectors */
473 be64 secondary_lba; /* sectors */
474 be64 workspace_lba; /* sectors */
475 int pdnum; /* index in ->phys */
476 struct spare_assign *spare;
477 void *mdupdate; /* hold metadata update */
478
479 /* These fields used by auto-layout */
480 int raiddisk; /* slot to fill in autolayout */
481 __u64 esize;
482 };
483 };
484 struct disk_data disk;
485 struct vcl *vlist[0]; /* max_part in size */
486 } *dlist, *add_list;
487 };
488
489 #ifndef offsetof
490 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
491 #endif
492
493 #if DEBUG
494 static int all_ff(const char *guid);
495 static void pr_state(struct ddf_super *ddf, const char *msg)
496 {
497 unsigned int i;
498 dprintf("%s/%s: ", __func__, msg);
499 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
500 if (all_ff(ddf->virt->entries[i].guid))
501 continue;
502 dprintf("%u(s=%02x i=%02x) ", i,
503 ddf->virt->entries[i].state,
504 ddf->virt->entries[i].init_state);
505 }
506 dprintf("\n");
507 }
508 #else
509 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
510 #endif
511
512 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
513 {
514 ddf->updates_pending = 1;
515 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
516 pr_state(ddf, func);
517 }
518
519 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
520
521 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
522 be32 refnum, unsigned int nmax,
523 const struct vd_config **bvd,
524 unsigned int *idx);
525
526 static be32 calc_crc(void *buf, int len)
527 {
528 /* crcs are always at the same place as in the ddf_header */
529 struct ddf_header *ddf = buf;
530 be32 oldcrc = ddf->crc;
531 __u32 newcrc;
532 ddf->crc = cpu_to_be32(0xffffffff);
533
534 newcrc = crc32(0, buf, len);
535 ddf->crc = oldcrc;
536 /* The crc is store (like everything) bigendian, so convert
537 * here for simplicity
538 */
539 return cpu_to_be32(newcrc);
540 }
541
542 #define DDF_INVALID_LEVEL 0xff
543 #define DDF_NO_SECONDARY 0xff
544 static int err_bad_md_layout(const mdu_array_info_t *array)
545 {
546 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
547 array->level, array->layout, array->raid_disks);
548 return -1;
549 }
550
551 static int layout_md2ddf(const mdu_array_info_t *array,
552 struct vd_config *conf)
553 {
554 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
555 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
556 __u8 sec_elmnt_count = 1;
557 __u8 srl = DDF_NO_SECONDARY;
558
559 switch (array->level) {
560 case LEVEL_LINEAR:
561 prl = DDF_CONCAT;
562 break;
563 case 0:
564 rlq = DDF_RAID0_SIMPLE;
565 prl = DDF_RAID0;
566 break;
567 case 1:
568 switch (array->raid_disks) {
569 case 2:
570 rlq = DDF_RAID1_SIMPLE;
571 break;
572 case 3:
573 rlq = DDF_RAID1_MULTI;
574 break;
575 default:
576 return err_bad_md_layout(array);
577 }
578 prl = DDF_RAID1;
579 break;
580 case 4:
581 if (array->layout != 0)
582 return err_bad_md_layout(array);
583 rlq = DDF_RAID4_N;
584 prl = DDF_RAID4;
585 break;
586 case 5:
587 switch (array->layout) {
588 case ALGORITHM_LEFT_ASYMMETRIC:
589 rlq = DDF_RAID5_N_RESTART;
590 break;
591 case ALGORITHM_RIGHT_ASYMMETRIC:
592 rlq = DDF_RAID5_0_RESTART;
593 break;
594 case ALGORITHM_LEFT_SYMMETRIC:
595 rlq = DDF_RAID5_N_CONTINUE;
596 break;
597 case ALGORITHM_RIGHT_SYMMETRIC:
598 /* not mentioned in standard */
599 default:
600 return err_bad_md_layout(array);
601 }
602 prl = DDF_RAID5;
603 break;
604 case 6:
605 switch (array->layout) {
606 case ALGORITHM_ROTATING_N_RESTART:
607 rlq = DDF_RAID5_N_RESTART;
608 break;
609 case ALGORITHM_ROTATING_ZERO_RESTART:
610 rlq = DDF_RAID6_0_RESTART;
611 break;
612 case ALGORITHM_ROTATING_N_CONTINUE:
613 rlq = DDF_RAID5_N_CONTINUE;
614 break;
615 default:
616 return err_bad_md_layout(array);
617 }
618 prl = DDF_RAID6;
619 break;
620 case 10:
621 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
622 rlq = DDF_RAID1_SIMPLE;
623 prim_elmnt_count = cpu_to_be16(2);
624 sec_elmnt_count = array->raid_disks / 2;
625 } else if (array->raid_disks % 3 == 0
626 && array->layout == 0x103) {
627 rlq = DDF_RAID1_MULTI;
628 prim_elmnt_count = cpu_to_be16(3);
629 sec_elmnt_count = array->raid_disks / 3;
630 } else
631 return err_bad_md_layout(array);
632 srl = DDF_2SPANNED;
633 prl = DDF_RAID1;
634 break;
635 default:
636 return err_bad_md_layout(array);
637 }
638 conf->prl = prl;
639 conf->prim_elmnt_count = prim_elmnt_count;
640 conf->rlq = rlq;
641 conf->srl = srl;
642 conf->sec_elmnt_count = sec_elmnt_count;
643 return 0;
644 }
645
646 static int err_bad_ddf_layout(const struct vd_config *conf)
647 {
648 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
649 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
650 return -1;
651 }
652
653 static int layout_ddf2md(const struct vd_config *conf,
654 mdu_array_info_t *array)
655 {
656 int level = LEVEL_UNSUPPORTED;
657 int layout = 0;
658 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
659
660 if (conf->sec_elmnt_count > 1) {
661 /* see also check_secondary() */
662 if (conf->prl != DDF_RAID1 ||
663 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
664 pr_err("Unsupported secondary RAID level %u/%u\n",
665 conf->prl, conf->srl);
666 return -1;
667 }
668 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
669 layout = 0x102;
670 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
671 layout = 0x103;
672 else
673 return err_bad_ddf_layout(conf);
674 raiddisks *= conf->sec_elmnt_count;
675 level = 10;
676 goto good;
677 }
678
679 switch (conf->prl) {
680 case DDF_CONCAT:
681 level = LEVEL_LINEAR;
682 break;
683 case DDF_RAID0:
684 if (conf->rlq != DDF_RAID0_SIMPLE)
685 return err_bad_ddf_layout(conf);
686 level = 0;
687 break;
688 case DDF_RAID1:
689 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
690 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
691 return err_bad_ddf_layout(conf);
692 level = 1;
693 break;
694 case DDF_RAID4:
695 if (conf->rlq != DDF_RAID4_N)
696 return err_bad_ddf_layout(conf);
697 level = 4;
698 break;
699 case DDF_RAID5:
700 switch (conf->rlq) {
701 case DDF_RAID5_N_RESTART:
702 layout = ALGORITHM_LEFT_ASYMMETRIC;
703 break;
704 case DDF_RAID5_0_RESTART:
705 layout = ALGORITHM_RIGHT_ASYMMETRIC;
706 break;
707 case DDF_RAID5_N_CONTINUE:
708 layout = ALGORITHM_LEFT_SYMMETRIC;
709 break;
710 default:
711 return err_bad_ddf_layout(conf);
712 }
713 level = 5;
714 break;
715 case DDF_RAID6:
716 switch (conf->rlq) {
717 case DDF_RAID5_N_RESTART:
718 layout = ALGORITHM_ROTATING_N_RESTART;
719 break;
720 case DDF_RAID6_0_RESTART:
721 layout = ALGORITHM_ROTATING_ZERO_RESTART;
722 break;
723 case DDF_RAID5_N_CONTINUE:
724 layout = ALGORITHM_ROTATING_N_CONTINUE;
725 break;
726 default:
727 return err_bad_ddf_layout(conf);
728 }
729 level = 6;
730 break;
731 default:
732 return err_bad_ddf_layout(conf);
733 };
734
735 good:
736 array->level = level;
737 array->layout = layout;
738 array->raid_disks = raiddisks;
739 return 0;
740 }
741
742 static int load_ddf_header(int fd, unsigned long long lba,
743 unsigned long long size,
744 int type,
745 struct ddf_header *hdr, struct ddf_header *anchor)
746 {
747 /* read a ddf header (primary or secondary) from fd/lba
748 * and check that it is consistent with anchor
749 * Need to check:
750 * magic, crc, guid, rev, and LBA's header_type, and
751 * everything after header_type must be the same
752 */
753 if (lba >= size-1)
754 return 0;
755
756 if (lseek64(fd, lba<<9, 0) < 0)
757 return 0;
758
759 if (read(fd, hdr, 512) != 512)
760 return 0;
761
762 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
763 pr_err("%s: bad header magic\n", __func__);
764 return 0;
765 }
766 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
767 pr_err("%s: bad CRC\n", __func__);
768 return 0;
769 }
770 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
771 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
772 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
773 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
774 hdr->type != type ||
775 memcmp(anchor->pad2, hdr->pad2, 512 -
776 offsetof(struct ddf_header, pad2)) != 0) {
777 pr_err("%s: header mismatch\n", __func__);
778 return 0;
779 }
780
781 /* Looks good enough to me... */
782 return 1;
783 }
784
785 static void *load_section(int fd, struct ddf_super *super, void *buf,
786 be32 offset_be, be32 len_be, int check)
787 {
788 unsigned long long offset = be32_to_cpu(offset_be);
789 unsigned long long len = be32_to_cpu(len_be);
790 int dofree = (buf == NULL);
791
792 if (check)
793 if (len != 2 && len != 8 && len != 32
794 && len != 128 && len != 512)
795 return NULL;
796
797 if (len > 1024)
798 return NULL;
799 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
800 buf = NULL;
801
802 if (!buf)
803 return NULL;
804
805 if (super->active->type == 1)
806 offset += be64_to_cpu(super->active->primary_lba);
807 else
808 offset += be64_to_cpu(super->active->secondary_lba);
809
810 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
811 if (dofree)
812 free(buf);
813 return NULL;
814 }
815 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
816 if (dofree)
817 free(buf);
818 return NULL;
819 }
820 return buf;
821 }
822
823 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
824 {
825 unsigned long long dsize;
826
827 get_dev_size(fd, NULL, &dsize);
828
829 if (lseek64(fd, dsize-512, 0) < 0) {
830 if (devname)
831 pr_err("Cannot seek to anchor block on %s: %s\n",
832 devname, strerror(errno));
833 return 1;
834 }
835 if (read(fd, &super->anchor, 512) != 512) {
836 if (devname)
837 pr_err("Cannot read anchor block on %s: %s\n",
838 devname, strerror(errno));
839 return 1;
840 }
841 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
842 if (devname)
843 pr_err("no DDF anchor found on %s\n",
844 devname);
845 return 2;
846 }
847 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
848 if (devname)
849 pr_err("bad CRC on anchor on %s\n",
850 devname);
851 return 2;
852 }
853 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
854 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
855 if (devname)
856 pr_err("can only support super revision"
857 " %.8s and earlier, not %.8s on %s\n",
858 DDF_REVISION_2, super->anchor.revision,devname);
859 return 2;
860 }
861 super->active = NULL;
862 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
863 dsize >> 9, 1,
864 &super->primary, &super->anchor) == 0) {
865 if (devname)
866 pr_err("Failed to load primary DDF header "
867 "on %s\n", devname);
868 } else
869 super->active = &super->primary;
870
871 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
872 dsize >> 9, 2,
873 &super->secondary, &super->anchor)) {
874 if (super->active == NULL
875 || (be32_to_cpu(super->primary.seq)
876 < be32_to_cpu(super->secondary.seq) &&
877 !super->secondary.openflag)
878 || (be32_to_cpu(super->primary.seq)
879 == be32_to_cpu(super->secondary.seq) &&
880 super->primary.openflag && !super->secondary.openflag)
881 )
882 super->active = &super->secondary;
883 } else if (devname &&
884 be64_to_cpu(super->anchor.secondary_lba) != ~(__u64)0)
885 pr_err("Failed to load secondary DDF header on %s\n",
886 devname);
887 if (super->active == NULL)
888 return 2;
889 return 0;
890 }
891
892 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
893 {
894 void *ok;
895 ok = load_section(fd, super, &super->controller,
896 super->active->controller_section_offset,
897 super->active->controller_section_length,
898 0);
899 super->phys = load_section(fd, super, NULL,
900 super->active->phys_section_offset,
901 super->active->phys_section_length,
902 1);
903 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
904
905 super->virt = load_section(fd, super, NULL,
906 super->active->virt_section_offset,
907 super->active->virt_section_length,
908 1);
909 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
910 if (!ok ||
911 !super->phys ||
912 !super->virt) {
913 free(super->phys);
914 free(super->virt);
915 super->phys = NULL;
916 super->virt = NULL;
917 return 2;
918 }
919 super->conflist = NULL;
920 super->dlist = NULL;
921
922 super->max_part = be16_to_cpu(super->active->max_partitions);
923 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
924 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
925 return 0;
926 }
927
928 #define DDF_UNUSED_BVD 0xff
929 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
930 {
931 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
932 unsigned int i, vdsize;
933 void *p;
934 if (n_vds == 0) {
935 vcl->other_bvds = NULL;
936 return 0;
937 }
938 vdsize = ddf->conf_rec_len * 512;
939 if (posix_memalign(&p, 512, n_vds *
940 (vdsize + sizeof(struct vd_config *))) != 0)
941 return -1;
942 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
943 for (i = 0; i < n_vds; i++) {
944 vcl->other_bvds[i] = p + i * vdsize;
945 memset(vcl->other_bvds[i], 0, vdsize);
946 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
947 }
948 return 0;
949 }
950
951 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
952 unsigned int len)
953 {
954 int i;
955 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
956 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
957 break;
958
959 if (i < vcl->conf.sec_elmnt_count-1) {
960 if (be32_to_cpu(vd->seqnum) <=
961 be32_to_cpu(vcl->other_bvds[i]->seqnum))
962 return;
963 } else {
964 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
965 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
966 break;
967 if (i == vcl->conf.sec_elmnt_count-1) {
968 pr_err("no space for sec level config %u, count is %u\n",
969 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
970 return;
971 }
972 }
973 memcpy(vcl->other_bvds[i], vd, len);
974 }
975
976 static int load_ddf_local(int fd, struct ddf_super *super,
977 char *devname, int keep)
978 {
979 struct dl *dl;
980 struct stat stb;
981 char *conf;
982 unsigned int i;
983 unsigned int confsec;
984 int vnum;
985 unsigned int max_virt_disks = be16_to_cpu
986 (super->active->max_vd_entries);
987 unsigned long long dsize;
988
989 /* First the local disk info */
990 if (posix_memalign((void**)&dl, 512,
991 sizeof(*dl) +
992 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
993 pr_err("%s could not allocate disk info buffer\n",
994 __func__);
995 return 1;
996 }
997
998 load_section(fd, super, &dl->disk,
999 super->active->data_section_offset,
1000 super->active->data_section_length,
1001 0);
1002 dl->devname = devname ? xstrdup(devname) : NULL;
1003
1004 fstat(fd, &stb);
1005 dl->major = major(stb.st_rdev);
1006 dl->minor = minor(stb.st_rdev);
1007 dl->next = super->dlist;
1008 dl->fd = keep ? fd : -1;
1009
1010 dl->size = 0;
1011 if (get_dev_size(fd, devname, &dsize))
1012 dl->size = dsize >> 9;
1013 /* If the disks have different sizes, the LBAs will differ
1014 * between phys disks.
1015 * At this point here, the values in super->active must be valid
1016 * for this phys disk. */
1017 dl->primary_lba = super->active->primary_lba;
1018 dl->secondary_lba = super->active->secondary_lba;
1019 dl->workspace_lba = super->active->workspace_lba;
1020 dl->spare = NULL;
1021 for (i = 0 ; i < super->max_part ; i++)
1022 dl->vlist[i] = NULL;
1023 super->dlist = dl;
1024 dl->pdnum = -1;
1025 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1026 if (memcmp(super->phys->entries[i].guid,
1027 dl->disk.guid, DDF_GUID_LEN) == 0)
1028 dl->pdnum = i;
1029
1030 /* Now the config list. */
1031 /* 'conf' is an array of config entries, some of which are
1032 * probably invalid. Those which are good need to be copied into
1033 * the conflist
1034 */
1035
1036 conf = load_section(fd, super, super->conf,
1037 super->active->config_section_offset,
1038 super->active->config_section_length,
1039 0);
1040 super->conf = conf;
1041 vnum = 0;
1042 for (confsec = 0;
1043 confsec < be32_to_cpu(super->active->config_section_length);
1044 confsec += super->conf_rec_len) {
1045 struct vd_config *vd =
1046 (struct vd_config *)((char*)conf + confsec*512);
1047 struct vcl *vcl;
1048
1049 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1050 if (dl->spare)
1051 continue;
1052 if (posix_memalign((void**)&dl->spare, 512,
1053 super->conf_rec_len*512) != 0) {
1054 pr_err("%s could not allocate spare info buf\n",
1055 __func__);
1056 return 1;
1057 }
1058
1059 memcpy(dl->spare, vd, super->conf_rec_len*512);
1060 continue;
1061 }
1062 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1063 continue;
1064 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1065 if (memcmp(vcl->conf.guid,
1066 vd->guid, DDF_GUID_LEN) == 0)
1067 break;
1068 }
1069
1070 if (vcl) {
1071 dl->vlist[vnum++] = vcl;
1072 if (vcl->other_bvds != NULL &&
1073 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1074 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1075 continue;
1076 }
1077 if (be32_to_cpu(vd->seqnum) <=
1078 be32_to_cpu(vcl->conf.seqnum))
1079 continue;
1080 } else {
1081 if (posix_memalign((void**)&vcl, 512,
1082 (super->conf_rec_len*512 +
1083 offsetof(struct vcl, conf))) != 0) {
1084 pr_err("%s could not allocate vcl buf\n",
1085 __func__);
1086 return 1;
1087 }
1088 vcl->next = super->conflist;
1089 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1090 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1091 if (alloc_other_bvds(super, vcl) != 0) {
1092 pr_err("%s could not allocate other bvds\n",
1093 __func__);
1094 free(vcl);
1095 return 1;
1096 };
1097 super->conflist = vcl;
1098 dl->vlist[vnum++] = vcl;
1099 }
1100 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1101 for (i=0; i < max_virt_disks ; i++)
1102 if (memcmp(super->virt->entries[i].guid,
1103 vcl->conf.guid, DDF_GUID_LEN)==0)
1104 break;
1105 if (i < max_virt_disks)
1106 vcl->vcnum = i;
1107 }
1108
1109 return 0;
1110 }
1111
1112 #ifndef MDASSEMBLE
1113 static int load_super_ddf_all(struct supertype *st, int fd,
1114 void **sbp, char *devname);
1115 #endif
1116
1117 static void free_super_ddf(struct supertype *st);
1118
1119 static int load_super_ddf(struct supertype *st, int fd,
1120 char *devname)
1121 {
1122 unsigned long long dsize;
1123 struct ddf_super *super;
1124 int rv;
1125
1126 if (get_dev_size(fd, devname, &dsize) == 0)
1127 return 1;
1128
1129 if (!st->ignore_hw_compat && test_partition(fd))
1130 /* DDF is not allowed on partitions */
1131 return 1;
1132
1133 /* 32M is a lower bound */
1134 if (dsize <= 32*1024*1024) {
1135 if (devname)
1136 pr_err("%s is too small for ddf: "
1137 "size is %llu sectors.\n",
1138 devname, dsize>>9);
1139 return 1;
1140 }
1141 if (dsize & 511) {
1142 if (devname)
1143 pr_err("%s is an odd size for ddf: "
1144 "size is %llu bytes.\n",
1145 devname, dsize);
1146 return 1;
1147 }
1148
1149 free_super_ddf(st);
1150
1151 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1152 pr_err("malloc of %zu failed.\n",
1153 sizeof(*super));
1154 return 1;
1155 }
1156 memset(super, 0, sizeof(*super));
1157
1158 rv = load_ddf_headers(fd, super, devname);
1159 if (rv) {
1160 free(super);
1161 return rv;
1162 }
1163
1164 /* Have valid headers and have chosen the best. Let's read in the rest*/
1165
1166 rv = load_ddf_global(fd, super, devname);
1167
1168 if (rv) {
1169 if (devname)
1170 pr_err("Failed to load all information "
1171 "sections on %s\n", devname);
1172 free(super);
1173 return rv;
1174 }
1175
1176 rv = load_ddf_local(fd, super, devname, 0);
1177
1178 if (rv) {
1179 if (devname)
1180 pr_err("Failed to load all information "
1181 "sections on %s\n", devname);
1182 free(super);
1183 return rv;
1184 }
1185
1186 /* Should possibly check the sections .... */
1187
1188 st->sb = super;
1189 if (st->ss == NULL) {
1190 st->ss = &super_ddf;
1191 st->minor_version = 0;
1192 st->max_devs = 512;
1193 }
1194 return 0;
1195
1196 }
1197
1198 static void free_super_ddf(struct supertype *st)
1199 {
1200 struct ddf_super *ddf = st->sb;
1201 if (ddf == NULL)
1202 return;
1203 free(ddf->phys);
1204 free(ddf->virt);
1205 free(ddf->conf);
1206 while (ddf->conflist) {
1207 struct vcl *v = ddf->conflist;
1208 ddf->conflist = v->next;
1209 if (v->block_sizes)
1210 free(v->block_sizes);
1211 if (v->other_bvds)
1212 /*
1213 v->other_bvds[0] points to beginning of buffer,
1214 see alloc_other_bvds()
1215 */
1216 free(v->other_bvds[0]);
1217 free(v);
1218 }
1219 while (ddf->dlist) {
1220 struct dl *d = ddf->dlist;
1221 ddf->dlist = d->next;
1222 if (d->fd >= 0)
1223 close(d->fd);
1224 if (d->spare)
1225 free(d->spare);
1226 free(d);
1227 }
1228 while (ddf->add_list) {
1229 struct dl *d = ddf->add_list;
1230 ddf->add_list = d->next;
1231 if (d->fd >= 0)
1232 close(d->fd);
1233 if (d->spare)
1234 free(d->spare);
1235 free(d);
1236 }
1237 free(ddf);
1238 st->sb = NULL;
1239 }
1240
1241 static struct supertype *match_metadata_desc_ddf(char *arg)
1242 {
1243 /* 'ddf' only support containers */
1244 struct supertype *st;
1245 if (strcmp(arg, "ddf") != 0 &&
1246 strcmp(arg, "default") != 0
1247 )
1248 return NULL;
1249
1250 st = xcalloc(1, sizeof(*st));
1251 st->ss = &super_ddf;
1252 st->max_devs = 512;
1253 st->minor_version = 0;
1254 st->sb = NULL;
1255 return st;
1256 }
1257
1258 #ifndef MDASSEMBLE
1259
1260 static mapping_t ddf_state[] = {
1261 { "Optimal", 0},
1262 { "Degraded", 1},
1263 { "Deleted", 2},
1264 { "Missing", 3},
1265 { "Failed", 4},
1266 { "Partially Optimal", 5},
1267 { "-reserved-", 6},
1268 { "-reserved-", 7},
1269 { NULL, 0}
1270 };
1271
1272 static mapping_t ddf_init_state[] = {
1273 { "Not Initialised", 0},
1274 { "QuickInit in Progress", 1},
1275 { "Fully Initialised", 2},
1276 { "*UNKNOWN*", 3},
1277 { NULL, 0}
1278 };
1279 static mapping_t ddf_access[] = {
1280 { "Read/Write", 0},
1281 { "Reserved", 1},
1282 { "Read Only", 2},
1283 { "Blocked (no access)", 3},
1284 { NULL ,0}
1285 };
1286
1287 static mapping_t ddf_level[] = {
1288 { "RAID0", DDF_RAID0},
1289 { "RAID1", DDF_RAID1},
1290 { "RAID3", DDF_RAID3},
1291 { "RAID4", DDF_RAID4},
1292 { "RAID5", DDF_RAID5},
1293 { "RAID1E",DDF_RAID1E},
1294 { "JBOD", DDF_JBOD},
1295 { "CONCAT",DDF_CONCAT},
1296 { "RAID5E",DDF_RAID5E},
1297 { "RAID5EE",DDF_RAID5EE},
1298 { "RAID6", DDF_RAID6},
1299 { NULL, 0}
1300 };
1301 static mapping_t ddf_sec_level[] = {
1302 { "Striped", DDF_2STRIPED},
1303 { "Mirrored", DDF_2MIRRORED},
1304 { "Concat", DDF_2CONCAT},
1305 { "Spanned", DDF_2SPANNED},
1306 { NULL, 0}
1307 };
1308 #endif
1309
1310 static int all_ff(const char *guid)
1311 {
1312 int i;
1313 for (i = 0; i < DDF_GUID_LEN; i++)
1314 if (guid[i] != (char)0xff)
1315 return 0;
1316 return 1;
1317 }
1318
1319 static const char *guid_str(const char *guid)
1320 {
1321 static char buf[DDF_GUID_LEN*2+1];
1322 int i;
1323 char *p = buf;
1324 for (i = 0; i < DDF_GUID_LEN; i++) {
1325 unsigned char c = guid[i];
1326 if (c >= 32 && c < 127)
1327 p += sprintf(p, "%c", c);
1328 else
1329 p += sprintf(p, "%02x", c);
1330 }
1331 *p = '\0';
1332 return (const char *) buf;
1333 }
1334
1335 #ifndef MDASSEMBLE
1336 static void print_guid(char *guid, int tstamp)
1337 {
1338 /* A GUIDs are part (or all) ASCII and part binary.
1339 * They tend to be space padded.
1340 * We print the GUID in HEX, then in parentheses add
1341 * any initial ASCII sequence, and a possible
1342 * time stamp from bytes 16-19
1343 */
1344 int l = DDF_GUID_LEN;
1345 int i;
1346
1347 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1348 if ((i&3)==0 && i != 0) printf(":");
1349 printf("%02X", guid[i]&255);
1350 }
1351
1352 printf("\n (");
1353 while (l && guid[l-1] == ' ')
1354 l--;
1355 for (i=0 ; i<l ; i++) {
1356 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1357 fputc(guid[i], stdout);
1358 else
1359 break;
1360 }
1361 if (tstamp) {
1362 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1363 char tbuf[100];
1364 struct tm *tm;
1365 tm = localtime(&then);
1366 strftime(tbuf, 100, " %D %T",tm);
1367 fputs(tbuf, stdout);
1368 }
1369 printf(")");
1370 }
1371
1372 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1373 {
1374 int crl = sb->conf_rec_len;
1375 struct vcl *vcl;
1376
1377 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1378 unsigned int i;
1379 struct vd_config *vc = &vcl->conf;
1380
1381 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1382 continue;
1383 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1384 continue;
1385
1386 /* Ok, we know about this VD, let's give more details */
1387 printf(" Raid Devices[%d] : %d (", n,
1388 be16_to_cpu(vc->prim_elmnt_count));
1389 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1390 int j;
1391 int cnt = be16_to_cpu(sb->phys->used_pdes);
1392 for (j=0; j<cnt; j++)
1393 if (be32_eq(vc->phys_refnum[i],
1394 sb->phys->entries[j].refnum))
1395 break;
1396 if (i) printf(" ");
1397 if (j < cnt)
1398 printf("%d", j);
1399 else
1400 printf("--");
1401 }
1402 printf(")\n");
1403 if (vc->chunk_shift != 255)
1404 printf(" Chunk Size[%d] : %d sectors\n", n,
1405 1 << vc->chunk_shift);
1406 printf(" Raid Level[%d] : %s\n", n,
1407 map_num(ddf_level, vc->prl)?:"-unknown-");
1408 if (vc->sec_elmnt_count != 1) {
1409 printf(" Secondary Position[%d] : %d of %d\n", n,
1410 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1411 printf(" Secondary Level[%d] : %s\n", n,
1412 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1413 }
1414 printf(" Device Size[%d] : %llu\n", n,
1415 be64_to_cpu(vc->blocks)/2);
1416 printf(" Array Size[%d] : %llu\n", n,
1417 be64_to_cpu(vc->array_blocks)/2);
1418 }
1419 }
1420
1421 static void examine_vds(struct ddf_super *sb)
1422 {
1423 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1424 unsigned int i;
1425 printf(" Virtual Disks : %d\n", cnt);
1426
1427 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1428 struct virtual_entry *ve = &sb->virt->entries[i];
1429 if (all_ff(ve->guid))
1430 continue;
1431 printf("\n");
1432 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1433 printf("\n");
1434 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1435 printf(" state[%d] : %s, %s%s\n", i,
1436 map_num(ddf_state, ve->state & 7),
1437 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1438 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1439 printf(" init state[%d] : %s\n", i,
1440 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1441 printf(" access[%d] : %s\n", i,
1442 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1443 printf(" Name[%d] : %.16s\n", i, ve->name);
1444 examine_vd(i, sb, ve->guid);
1445 }
1446 if (cnt) printf("\n");
1447 }
1448
1449 static void examine_pds(struct ddf_super *sb)
1450 {
1451 int cnt = be16_to_cpu(sb->phys->used_pdes);
1452 int i;
1453 struct dl *dl;
1454 printf(" Physical Disks : %d\n", cnt);
1455 printf(" Number RefNo Size Device Type/State\n");
1456
1457 for (i=0 ; i<cnt ; i++) {
1458 struct phys_disk_entry *pd = &sb->phys->entries[i];
1459 int type = be16_to_cpu(pd->type);
1460 int state = be16_to_cpu(pd->state);
1461
1462 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1463 //printf("\n");
1464 printf(" %3d %08x ", i,
1465 be32_to_cpu(pd->refnum));
1466 printf("%8lluK ",
1467 be64_to_cpu(pd->config_size)>>1);
1468 for (dl = sb->dlist; dl ; dl = dl->next) {
1469 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1470 char *dv = map_dev(dl->major, dl->minor, 0);
1471 if (dv) {
1472 printf("%-15s", dv);
1473 break;
1474 }
1475 }
1476 }
1477 if (!dl)
1478 printf("%15s","");
1479 printf(" %s%s%s%s%s",
1480 (type&2) ? "active":"",
1481 (type&4) ? "Global-Spare":"",
1482 (type&8) ? "spare" : "",
1483 (type&16)? ", foreign" : "",
1484 (type&32)? "pass-through" : "");
1485 if (state & DDF_Failed)
1486 /* This over-rides these three */
1487 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1488 printf("/%s%s%s%s%s%s%s",
1489 (state&1)? "Online": "Offline",
1490 (state&2)? ", Failed": "",
1491 (state&4)? ", Rebuilding": "",
1492 (state&8)? ", in-transition": "",
1493 (state&16)? ", SMART-errors": "",
1494 (state&32)? ", Unrecovered-Read-Errors": "",
1495 (state&64)? ", Missing" : "");
1496 printf("\n");
1497 }
1498 }
1499
1500 static void examine_super_ddf(struct supertype *st, char *homehost)
1501 {
1502 struct ddf_super *sb = st->sb;
1503
1504 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1505 printf(" Version : %.8s\n", sb->anchor.revision);
1506 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1507 printf("\n");
1508 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1509 printf("\n");
1510 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1511 printf(" Redundant hdr : %s\n", be32_eq(sb->secondary.magic,
1512 DDF_HEADER_MAGIC)
1513 ?"yes" : "no");
1514 examine_vds(sb);
1515 examine_pds(sb);
1516 }
1517
1518 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1519
1520 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1521 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1522 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i);
1523
1524 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1525 {
1526 /*
1527 * Figure out the VD number for this supertype.
1528 * Returns DDF_CONTAINER for the container itself,
1529 * and DDF_NOTFOUND on error.
1530 */
1531 struct ddf_super *ddf = st->sb;
1532 struct mdinfo *sra;
1533 char *sub, *end;
1534 unsigned int vcnum;
1535
1536 if (*st->container_devnm == '\0')
1537 return DDF_CONTAINER;
1538
1539 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1540 if (!sra || sra->array.major_version != -1 ||
1541 sra->array.minor_version != -2 ||
1542 !is_subarray(sra->text_version))
1543 return DDF_NOTFOUND;
1544
1545 sub = strchr(sra->text_version + 1, '/');
1546 if (sub != NULL)
1547 vcnum = strtoul(sub + 1, &end, 10);
1548 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1549 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1550 return DDF_NOTFOUND;
1551
1552 return vcnum;
1553 }
1554
1555 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1556 {
1557 /* We just write a generic DDF ARRAY entry
1558 */
1559 struct mdinfo info;
1560 char nbuf[64];
1561 getinfo_super_ddf(st, &info, NULL);
1562 fname_from_uuid(st, &info, nbuf, ':');
1563
1564 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1565 }
1566
1567 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1568 {
1569 /* We just write a generic DDF ARRAY entry
1570 */
1571 struct ddf_super *ddf = st->sb;
1572 struct mdinfo info;
1573 unsigned int i;
1574 char nbuf[64];
1575 getinfo_super_ddf(st, &info, NULL);
1576 fname_from_uuid(st, &info, nbuf, ':');
1577
1578 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1579 struct virtual_entry *ve = &ddf->virt->entries[i];
1580 struct vcl vcl;
1581 char nbuf1[64];
1582 char namebuf[17];
1583 if (all_ff(ve->guid))
1584 continue;
1585 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1586 ddf->currentconf =&vcl;
1587 vcl.vcnum = i;
1588 uuid_from_super_ddf(st, info.uuid);
1589 fname_from_uuid(st, &info, nbuf1, ':');
1590 _ddf_array_name(namebuf, ddf, i);
1591 printf("ARRAY%s%s container=%s member=%d UUID=%s\n",
1592 namebuf[0] == '\0' ? "" : " /dev/md/", namebuf,
1593 nbuf+5, i, nbuf1+5);
1594 }
1595 }
1596
1597 static void export_examine_super_ddf(struct supertype *st)
1598 {
1599 struct mdinfo info;
1600 char nbuf[64];
1601 getinfo_super_ddf(st, &info, NULL);
1602 fname_from_uuid(st, &info, nbuf, ':');
1603 printf("MD_METADATA=ddf\n");
1604 printf("MD_LEVEL=container\n");
1605 printf("MD_UUID=%s\n", nbuf+5);
1606 printf("MD_DEVICES=%u\n",
1607 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1608 }
1609
1610 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1611 {
1612 void *buf;
1613 unsigned long long dsize, offset;
1614 int bytes;
1615 struct ddf_header *ddf;
1616 int written = 0;
1617
1618 /* The meta consists of an anchor, a primary, and a secondary.
1619 * This all lives at the end of the device.
1620 * So it is easiest to find the earliest of primary and
1621 * secondary, and copy everything from there.
1622 *
1623 * Anchor is 512 from end It contains primary_lba and secondary_lba
1624 * we choose one of those
1625 */
1626
1627 if (posix_memalign(&buf, 4096, 4096) != 0)
1628 return 1;
1629
1630 if (!get_dev_size(from, NULL, &dsize))
1631 goto err;
1632
1633 if (lseek64(from, dsize-512, 0) < 0)
1634 goto err;
1635 if (read(from, buf, 512) != 512)
1636 goto err;
1637 ddf = buf;
1638 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1639 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1640 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1641 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1642 goto err;
1643
1644 offset = dsize - 512;
1645 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1646 offset = be64_to_cpu(ddf->primary_lba) << 9;
1647 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1648 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1649
1650 bytes = dsize - offset;
1651
1652 if (lseek64(from, offset, 0) < 0 ||
1653 lseek64(to, offset, 0) < 0)
1654 goto err;
1655 while (written < bytes) {
1656 int n = bytes - written;
1657 if (n > 4096)
1658 n = 4096;
1659 if (read(from, buf, n) != n)
1660 goto err;
1661 if (write(to, buf, n) != n)
1662 goto err;
1663 written += n;
1664 }
1665 free(buf);
1666 return 0;
1667 err:
1668 free(buf);
1669 return 1;
1670 }
1671
1672 static void detail_super_ddf(struct supertype *st, char *homehost)
1673 {
1674 /* FIXME later
1675 * Could print DDF GUID
1676 * Need to find which array
1677 * If whole, briefly list all arrays
1678 * If one, give name
1679 */
1680 }
1681
1682 static const char *vendors_with_variable_volume_UUID[] = {
1683 "LSI ",
1684 };
1685
1686 static int volume_id_is_reliable(const struct ddf_super *ddf)
1687 {
1688 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1689 int i;
1690 for (i = 0; i < n; i++)
1691 if (!memcmp(ddf->controller.guid,
1692 vendors_with_variable_volume_UUID[i], 8))
1693 return 0;
1694 return 1;
1695 }
1696
1697 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1698 unsigned int vcnum, int uuid[4])
1699 {
1700 char buf[DDF_GUID_LEN+18], sha[20], *p;
1701 struct sha1_ctx ctx;
1702 if (volume_id_is_reliable(ddf)) {
1703 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1704 return;
1705 }
1706 /*
1707 * Some fake RAID BIOSes (in particular, LSI ones) change the
1708 * VD GUID at every boot. These GUIDs are not suitable for
1709 * identifying an array. Luckily the header GUID appears to
1710 * remain constant.
1711 * We construct a pseudo-UUID from the header GUID and those
1712 * properties of the subarray that we expect to remain constant.
1713 */
1714 memset(buf, 0, sizeof(buf));
1715 p = buf;
1716 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1717 p += DDF_GUID_LEN;
1718 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1719 p += 16;
1720 *((__u16 *) p) = vcnum;
1721 sha1_init_ctx(&ctx);
1722 sha1_process_bytes(buf, sizeof(buf), &ctx);
1723 sha1_finish_ctx(&ctx, sha);
1724 memcpy(uuid, sha, 4*4);
1725 }
1726
1727 static void brief_detail_super_ddf(struct supertype *st)
1728 {
1729 struct mdinfo info;
1730 char nbuf[64];
1731 struct ddf_super *ddf = st->sb;
1732 unsigned int vcnum = get_vd_num_of_subarray(st);
1733 if (vcnum == DDF_CONTAINER)
1734 uuid_from_super_ddf(st, info.uuid);
1735 else if (vcnum == DDF_NOTFOUND)
1736 return;
1737 else
1738 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1739 fname_from_uuid(st, &info, nbuf,':');
1740 printf(" UUID=%s", nbuf + 5);
1741 }
1742 #endif
1743
1744 static int match_home_ddf(struct supertype *st, char *homehost)
1745 {
1746 /* It matches 'this' host if the controller is a
1747 * Linux-MD controller with vendor_data matching
1748 * the hostname
1749 */
1750 struct ddf_super *ddf = st->sb;
1751 unsigned int len;
1752
1753 if (!homehost)
1754 return 0;
1755 len = strlen(homehost);
1756
1757 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1758 len < sizeof(ddf->controller.vendor_data) &&
1759 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1760 ddf->controller.vendor_data[len] == 0);
1761 }
1762
1763 #ifndef MDASSEMBLE
1764 static int find_index_in_bvd(const struct ddf_super *ddf,
1765 const struct vd_config *conf, unsigned int n,
1766 unsigned int *n_bvd)
1767 {
1768 /*
1769 * Find the index of the n-th valid physical disk in this BVD
1770 */
1771 unsigned int i, j;
1772 for (i = 0, j = 0; i < ddf->mppe &&
1773 j < be16_to_cpu(conf->prim_elmnt_count); i++) {
1774 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1775 if (n == j) {
1776 *n_bvd = i;
1777 return 1;
1778 }
1779 j++;
1780 }
1781 }
1782 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1783 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1784 return 0;
1785 }
1786
1787 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1788 unsigned int n,
1789 unsigned int *n_bvd, struct vcl **vcl)
1790 {
1791 struct vcl *v;
1792
1793 for (v = ddf->conflist; v; v = v->next) {
1794 unsigned int nsec, ibvd = 0;
1795 struct vd_config *conf;
1796 if (inst != v->vcnum)
1797 continue;
1798 conf = &v->conf;
1799 if (conf->sec_elmnt_count == 1) {
1800 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1801 *vcl = v;
1802 return conf;
1803 } else
1804 goto bad;
1805 }
1806 if (v->other_bvds == NULL) {
1807 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1808 __func__, conf->sec_elmnt_count);
1809 goto bad;
1810 }
1811 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1812 if (conf->sec_elmnt_seq != nsec) {
1813 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1814 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1815 == nsec)
1816 break;
1817 }
1818 if (ibvd == conf->sec_elmnt_count)
1819 goto bad;
1820 conf = v->other_bvds[ibvd-1];
1821 }
1822 if (!find_index_in_bvd(ddf, conf,
1823 n - nsec*conf->sec_elmnt_count, n_bvd))
1824 goto bad;
1825 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1826 , __func__, n, *n_bvd, ibvd, inst);
1827 *vcl = v;
1828 return conf;
1829 }
1830 bad:
1831 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1832 return NULL;
1833 }
1834 #endif
1835
1836 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1837 {
1838 /* Find the entry in phys_disk which has the given refnum
1839 * and return it's index
1840 */
1841 unsigned int i;
1842 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1843 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1844 return i;
1845 return -1;
1846 }
1847
1848 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1849 {
1850 char buf[20];
1851 struct sha1_ctx ctx;
1852 sha1_init_ctx(&ctx);
1853 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1854 sha1_finish_ctx(&ctx, buf);
1855 memcpy(uuid, buf, 4*4);
1856 }
1857
1858 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1859 {
1860 /* The uuid returned here is used for:
1861 * uuid to put into bitmap file (Create, Grow)
1862 * uuid for backup header when saving critical section (Grow)
1863 * comparing uuids when re-adding a device into an array
1864 * In these cases the uuid required is that of the data-array,
1865 * not the device-set.
1866 * uuid to recognise same set when adding a missing device back
1867 * to an array. This is a uuid for the device-set.
1868 *
1869 * For each of these we can make do with a truncated
1870 * or hashed uuid rather than the original, as long as
1871 * everyone agrees.
1872 * In the case of SVD we assume the BVD is of interest,
1873 * though that might be the case if a bitmap were made for
1874 * a mirrored SVD - worry about that later.
1875 * So we need to find the VD configuration record for the
1876 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1877 * The first 16 bytes of the sha1 of these is used.
1878 */
1879 struct ddf_super *ddf = st->sb;
1880 struct vcl *vcl = ddf->currentconf;
1881
1882 if (vcl)
1883 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1884 else
1885 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1886 }
1887
1888 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1889
1890 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1891 {
1892 struct ddf_super *ddf = st->sb;
1893 int map_disks = info->array.raid_disks;
1894 __u32 *cptr;
1895
1896 if (ddf->currentconf) {
1897 getinfo_super_ddf_bvd(st, info, map);
1898 return;
1899 }
1900 memset(info, 0, sizeof(*info));
1901
1902 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1903 info->array.level = LEVEL_CONTAINER;
1904 info->array.layout = 0;
1905 info->array.md_minor = -1;
1906 cptr = (__u32 *)(ddf->anchor.guid + 16);
1907 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1908
1909 info->array.utime = 0;
1910 info->array.chunk_size = 0;
1911 info->container_enough = 1;
1912
1913 info->disk.major = 0;
1914 info->disk.minor = 0;
1915 if (ddf->dlist) {
1916 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1917 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1918
1919 info->data_offset = be64_to_cpu(ddf->phys->
1920 entries[info->disk.raid_disk].
1921 config_size);
1922 info->component_size = ddf->dlist->size - info->data_offset;
1923 } else {
1924 info->disk.number = -1;
1925 info->disk.raid_disk = -1;
1926 // info->disk.raid_disk = find refnum in the table and use index;
1927 }
1928 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1929
1930 info->recovery_start = MaxSector;
1931 info->reshape_active = 0;
1932 info->recovery_blocked = 0;
1933 info->name[0] = 0;
1934
1935 info->array.major_version = -1;
1936 info->array.minor_version = -2;
1937 strcpy(info->text_version, "ddf");
1938 info->safe_mode_delay = 0;
1939
1940 uuid_from_super_ddf(st, info->uuid);
1941
1942 if (map) {
1943 int i;
1944 for (i = 0 ; i < map_disks; i++) {
1945 if (i < info->array.raid_disks &&
1946 (be16_to_cpu(ddf->phys->entries[i].state)
1947 & DDF_Online) &&
1948 !(be16_to_cpu(ddf->phys->entries[i].state)
1949 & DDF_Failed))
1950 map[i] = 1;
1951 else
1952 map[i] = 0;
1953 }
1954 }
1955 }
1956
1957 /* size of name must be at least 17 bytes! */
1958 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i)
1959 {
1960 int j;
1961 memcpy(name, ddf->virt->entries[i].name, 16);
1962 name[16] = 0;
1963 for(j = 0; j < 16; j++)
1964 if (name[j] == ' ')
1965 name[j] = 0;
1966 }
1967
1968 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1969 {
1970 struct ddf_super *ddf = st->sb;
1971 struct vcl *vc = ddf->currentconf;
1972 int cd = ddf->currentdev;
1973 int n_prim;
1974 int j;
1975 struct dl *dl;
1976 int map_disks = info->array.raid_disks;
1977 __u32 *cptr;
1978 struct vd_config *conf;
1979
1980 memset(info, 0, sizeof(*info));
1981 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1982 return;
1983 info->array.md_minor = -1;
1984 cptr = (__u32 *)(vc->conf.guid + 16);
1985 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1986 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
1987 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1988 info->custom_array_size = 0;
1989
1990 conf = &vc->conf;
1991 n_prim = be16_to_cpu(conf->prim_elmnt_count);
1992 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
1993 int ibvd = cd / n_prim - 1;
1994 cd %= n_prim;
1995 conf = vc->other_bvds[ibvd];
1996 }
1997
1998 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1999 info->data_offset =
2000 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
2001 if (vc->block_sizes)
2002 info->component_size = vc->block_sizes[cd];
2003 else
2004 info->component_size = be64_to_cpu(conf->blocks);
2005 }
2006
2007 for (dl = ddf->dlist; dl ; dl = dl->next)
2008 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
2009 break;
2010
2011 info->disk.major = 0;
2012 info->disk.minor = 0;
2013 info->disk.state = 0;
2014 if (dl) {
2015 info->disk.major = dl->major;
2016 info->disk.minor = dl->minor;
2017 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2018 * be16_to_cpu(conf->prim_elmnt_count);
2019 info->disk.number = dl->pdnum;
2020 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2021 }
2022
2023 info->container_member = ddf->currentconf->vcnum;
2024
2025 info->recovery_start = MaxSector;
2026 info->resync_start = 0;
2027 info->reshape_active = 0;
2028 info->recovery_blocked = 0;
2029 if (!(ddf->virt->entries[info->container_member].state
2030 & DDF_state_inconsistent) &&
2031 (ddf->virt->entries[info->container_member].init_state
2032 & DDF_initstate_mask)
2033 == DDF_init_full)
2034 info->resync_start = MaxSector;
2035
2036 uuid_from_super_ddf(st, info->uuid);
2037
2038 info->array.major_version = -1;
2039 info->array.minor_version = -2;
2040 sprintf(info->text_version, "/%s/%d",
2041 st->container_devnm,
2042 info->container_member);
2043 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2044
2045 _ddf_array_name(info->name, ddf, info->container_member);
2046
2047 if (map)
2048 for (j = 0; j < map_disks; j++) {
2049 map[j] = 0;
2050 if (j < info->array.raid_disks) {
2051 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2052 if (i >= 0 &&
2053 (be16_to_cpu(ddf->phys->entries[i].state)
2054 & DDF_Online) &&
2055 !(be16_to_cpu(ddf->phys->entries[i].state)
2056 & DDF_Failed))
2057 map[i] = 1;
2058 }
2059 }
2060 }
2061
2062 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2063 char *update,
2064 char *devname, int verbose,
2065 int uuid_set, char *homehost)
2066 {
2067 /* For 'assemble' and 'force' we need to return non-zero if any
2068 * change was made. For others, the return value is ignored.
2069 * Update options are:
2070 * force-one : This device looks a bit old but needs to be included,
2071 * update age info appropriately.
2072 * assemble: clear any 'faulty' flag to allow this device to
2073 * be assembled.
2074 * force-array: Array is degraded but being forced, mark it clean
2075 * if that will be needed to assemble it.
2076 *
2077 * newdev: not used ????
2078 * grow: Array has gained a new device - this is currently for
2079 * linear only
2080 * resync: mark as dirty so a resync will happen.
2081 * uuid: Change the uuid of the array to match what is given
2082 * homehost: update the recorded homehost
2083 * name: update the name - preserving the homehost
2084 * _reshape_progress: record new reshape_progress position.
2085 *
2086 * Following are not relevant for this version:
2087 * sparc2.2 : update from old dodgey metadata
2088 * super-minor: change the preferred_minor number
2089 * summaries: update redundant counters.
2090 */
2091 int rv = 0;
2092 // struct ddf_super *ddf = st->sb;
2093 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2094 // struct virtual_entry *ve = find_ve(ddf);
2095
2096 /* we don't need to handle "force-*" or "assemble" as
2097 * there is no need to 'trick' the kernel. We the metadata is
2098 * first updated to activate the array, all the implied modifications
2099 * will just happen.
2100 */
2101
2102 if (strcmp(update, "grow") == 0) {
2103 /* FIXME */
2104 } else if (strcmp(update, "resync") == 0) {
2105 // info->resync_checkpoint = 0;
2106 } else if (strcmp(update, "homehost") == 0) {
2107 /* homehost is stored in controller->vendor_data,
2108 * or it is when we are the vendor
2109 */
2110 // if (info->vendor_is_local)
2111 // strcpy(ddf->controller.vendor_data, homehost);
2112 rv = -1;
2113 } else if (strcmp(update, "name") == 0) {
2114 /* name is stored in virtual_entry->name */
2115 // memset(ve->name, ' ', 16);
2116 // strncpy(ve->name, info->name, 16);
2117 rv = -1;
2118 } else if (strcmp(update, "_reshape_progress") == 0) {
2119 /* We don't support reshape yet */
2120 } else if (strcmp(update, "assemble") == 0 ) {
2121 /* Do nothing, just succeed */
2122 rv = 0;
2123 } else
2124 rv = -1;
2125
2126 // update_all_csum(ddf);
2127
2128 return rv;
2129 }
2130
2131 static void make_header_guid(char *guid)
2132 {
2133 be32 stamp;
2134 /* Create a DDF Header of Virtual Disk GUID */
2135
2136 /* 24 bytes of fiction required.
2137 * first 8 are a 'vendor-id' - "Linux-MD"
2138 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2139 * Remaining 8 random number plus timestamp
2140 */
2141 memcpy(guid, T10, sizeof(T10));
2142 stamp = cpu_to_be32(0xdeadbeef);
2143 memcpy(guid+8, &stamp, 4);
2144 stamp = cpu_to_be32(0);
2145 memcpy(guid+12, &stamp, 4);
2146 stamp = cpu_to_be32(time(0) - DECADE);
2147 memcpy(guid+16, &stamp, 4);
2148 stamp._v32 = random32();
2149 memcpy(guid+20, &stamp, 4);
2150 }
2151
2152 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2153 {
2154 unsigned int i;
2155 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2156 if (all_ff(ddf->virt->entries[i].guid))
2157 return i;
2158 }
2159 return DDF_NOTFOUND;
2160 }
2161
2162 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2163 const char *name)
2164 {
2165 unsigned int i;
2166 if (name == NULL)
2167 return DDF_NOTFOUND;
2168 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2169 if (all_ff(ddf->virt->entries[i].guid))
2170 continue;
2171 if (!strncmp(name, ddf->virt->entries[i].name,
2172 sizeof(ddf->virt->entries[i].name)))
2173 return i;
2174 }
2175 return DDF_NOTFOUND;
2176 }
2177
2178 #ifndef MDASSEMBLE
2179 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2180 const char *guid)
2181 {
2182 unsigned int i;
2183 if (guid == NULL || all_ff(guid))
2184 return DDF_NOTFOUND;
2185 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2186 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2187 return i;
2188 return DDF_NOTFOUND;
2189 }
2190 #endif
2191
2192 static int init_super_ddf_bvd(struct supertype *st,
2193 mdu_array_info_t *info,
2194 unsigned long long size,
2195 char *name, char *homehost,
2196 int *uuid, unsigned long long data_offset);
2197
2198 static int init_super_ddf(struct supertype *st,
2199 mdu_array_info_t *info,
2200 unsigned long long size, char *name, char *homehost,
2201 int *uuid, unsigned long long data_offset)
2202 {
2203 /* This is primarily called by Create when creating a new array.
2204 * We will then get add_to_super called for each component, and then
2205 * write_init_super called to write it out to each device.
2206 * For DDF, Create can create on fresh devices or on a pre-existing
2207 * array.
2208 * To create on a pre-existing array a different method will be called.
2209 * This one is just for fresh drives.
2210 *
2211 * We need to create the entire 'ddf' structure which includes:
2212 * DDF headers - these are easy.
2213 * Controller data - a Sector describing this controller .. not that
2214 * this is a controller exactly.
2215 * Physical Disk Record - one entry per device, so
2216 * leave plenty of space.
2217 * Virtual Disk Records - again, just leave plenty of space.
2218 * This just lists VDs, doesn't give details
2219 * Config records - describes the VDs that use this disk
2220 * DiskData - describes 'this' device.
2221 * BadBlockManagement - empty
2222 * Diag Space - empty
2223 * Vendor Logs - Could we put bitmaps here?
2224 *
2225 */
2226 struct ddf_super *ddf;
2227 char hostname[17];
2228 int hostlen;
2229 int max_phys_disks, max_virt_disks;
2230 unsigned long long sector;
2231 int clen;
2232 int i;
2233 int pdsize, vdsize;
2234 struct phys_disk *pd;
2235 struct virtual_disk *vd;
2236
2237 if (data_offset != INVALID_SECTORS) {
2238 pr_err("data-offset not supported by DDF\n");
2239 return 0;
2240 }
2241
2242 if (st->sb)
2243 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2244 data_offset);
2245
2246 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2247 pr_err("%s could not allocate superblock\n", __func__);
2248 return 0;
2249 }
2250 memset(ddf, 0, sizeof(*ddf));
2251 ddf->dlist = NULL; /* no physical disks yet */
2252 ddf->conflist = NULL; /* No virtual disks yet */
2253 st->sb = ddf;
2254
2255 if (info == NULL) {
2256 /* zeroing superblock */
2257 return 0;
2258 }
2259
2260 /* At least 32MB *must* be reserved for the ddf. So let's just
2261 * start 32MB from the end, and put the primary header there.
2262 * Don't do secondary for now.
2263 * We don't know exactly where that will be yet as it could be
2264 * different on each device. To just set up the lengths.
2265 *
2266 */
2267
2268 ddf->anchor.magic = DDF_HEADER_MAGIC;
2269 make_header_guid(ddf->anchor.guid);
2270
2271 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2272 ddf->anchor.seq = cpu_to_be32(1);
2273 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2274 ddf->anchor.openflag = 0xFF;
2275 ddf->anchor.foreignflag = 0;
2276 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2277 ddf->anchor.pad0 = 0xff;
2278 memset(ddf->anchor.pad1, 0xff, 12);
2279 memset(ddf->anchor.header_ext, 0xff, 32);
2280 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2281 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2282 ddf->anchor.type = DDF_HEADER_ANCHOR;
2283 memset(ddf->anchor.pad2, 0xff, 3);
2284 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2285 /* Put this at bottom of 32M reserved.. */
2286 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2287 max_phys_disks = 1023; /* Should be enough */
2288 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2289 max_virt_disks = 255;
2290 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks); /* ?? */
2291 ddf->anchor.max_partitions = cpu_to_be16(64); /* ?? */
2292 ddf->max_part = 64;
2293 ddf->mppe = 256;
2294 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2295 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2296 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2297 memset(ddf->anchor.pad3, 0xff, 54);
2298 /* controller sections is one sector long immediately
2299 * after the ddf header */
2300 sector = 1;
2301 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2302 ddf->anchor.controller_section_length = cpu_to_be32(1);
2303 sector += 1;
2304
2305 /* phys is 8 sectors after that */
2306 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2307 sizeof(struct phys_disk_entry)*max_phys_disks,
2308 512);
2309 switch(pdsize/512) {
2310 case 2: case 8: case 32: case 128: case 512: break;
2311 default: abort();
2312 }
2313 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2314 ddf->anchor.phys_section_length =
2315 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2316 sector += pdsize/512;
2317
2318 /* virt is another 32 sectors */
2319 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2320 sizeof(struct virtual_entry) * max_virt_disks,
2321 512);
2322 switch(vdsize/512) {
2323 case 2: case 8: case 32: case 128: case 512: break;
2324 default: abort();
2325 }
2326 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2327 ddf->anchor.virt_section_length =
2328 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2329 sector += vdsize/512;
2330
2331 clen = ddf->conf_rec_len * (ddf->max_part+1);
2332 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2333 ddf->anchor.config_section_length = cpu_to_be32(clen);
2334 sector += clen;
2335
2336 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2337 ddf->anchor.data_section_length = cpu_to_be32(1);
2338 sector += 1;
2339
2340 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2341 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2342 ddf->anchor.diag_space_length = cpu_to_be32(0);
2343 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2344 ddf->anchor.vendor_length = cpu_to_be32(0);
2345 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2346
2347 memset(ddf->anchor.pad4, 0xff, 256);
2348
2349 memcpy(&ddf->primary, &ddf->anchor, 512);
2350 memcpy(&ddf->secondary, &ddf->anchor, 512);
2351
2352 ddf->primary.openflag = 1; /* I guess.. */
2353 ddf->primary.type = DDF_HEADER_PRIMARY;
2354
2355 ddf->secondary.openflag = 1; /* I guess.. */
2356 ddf->secondary.type = DDF_HEADER_SECONDARY;
2357
2358 ddf->active = &ddf->primary;
2359
2360 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2361
2362 /* 24 more bytes of fiction required.
2363 * first 8 are a 'vendor-id' - "Linux-MD"
2364 * Remaining 16 are serial number.... maybe a hostname would do?
2365 */
2366 memcpy(ddf->controller.guid, T10, sizeof(T10));
2367 gethostname(hostname, sizeof(hostname));
2368 hostname[sizeof(hostname) - 1] = 0;
2369 hostlen = strlen(hostname);
2370 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2371 for (i = strlen(T10) ; i+hostlen < 24; i++)
2372 ddf->controller.guid[i] = ' ';
2373
2374 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2375 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2376 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2377 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2378 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2379 memset(ddf->controller.pad, 0xff, 8);
2380 memset(ddf->controller.vendor_data, 0xff, 448);
2381 if (homehost && strlen(homehost) < 440)
2382 strcpy((char*)ddf->controller.vendor_data, homehost);
2383
2384 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2385 pr_err("%s could not allocate pd\n", __func__);
2386 return 0;
2387 }
2388 ddf->phys = pd;
2389 ddf->pdsize = pdsize;
2390
2391 memset(pd, 0xff, pdsize);
2392 memset(pd, 0, sizeof(*pd));
2393 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2394 pd->used_pdes = cpu_to_be16(0);
2395 pd->max_pdes = cpu_to_be16(max_phys_disks);
2396 memset(pd->pad, 0xff, 52);
2397 for (i = 0; i < max_phys_disks; i++)
2398 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2399
2400 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2401 pr_err("%s could not allocate vd\n", __func__);
2402 return 0;
2403 }
2404 ddf->virt = vd;
2405 ddf->vdsize = vdsize;
2406 memset(vd, 0, vdsize);
2407 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2408 vd->populated_vdes = cpu_to_be16(0);
2409 vd->max_vdes = cpu_to_be16(max_virt_disks);
2410 memset(vd->pad, 0xff, 52);
2411
2412 for (i=0; i<max_virt_disks; i++)
2413 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2414
2415 st->sb = ddf;
2416 ddf_set_updates_pending(ddf);
2417 return 1;
2418 }
2419
2420 static int chunk_to_shift(int chunksize)
2421 {
2422 return ffs(chunksize/512)-1;
2423 }
2424
2425 #ifndef MDASSEMBLE
2426 struct extent {
2427 unsigned long long start, size;
2428 };
2429 static int cmp_extent(const void *av, const void *bv)
2430 {
2431 const struct extent *a = av;
2432 const struct extent *b = bv;
2433 if (a->start < b->start)
2434 return -1;
2435 if (a->start > b->start)
2436 return 1;
2437 return 0;
2438 }
2439
2440 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2441 {
2442 /* find a list of used extents on the give physical device
2443 * (dnum) of the given ddf.
2444 * Return a malloced array of 'struct extent'
2445
2446 * FIXME ignore DDF_Legacy devices?
2447
2448 */
2449 struct extent *rv;
2450 int n = 0;
2451 unsigned int i;
2452 __u16 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2453
2454 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2455 return NULL;
2456
2457 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2458
2459 for (i = 0; i < ddf->max_part; i++) {
2460 const struct vd_config *bvd;
2461 unsigned int ibvd;
2462 struct vcl *v = dl->vlist[i];
2463 if (v == NULL ||
2464 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2465 &bvd, &ibvd) == DDF_NOTFOUND)
2466 continue;
2467 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2468 rv[n].size = be64_to_cpu(bvd->blocks);
2469 n++;
2470 }
2471 qsort(rv, n, sizeof(*rv), cmp_extent);
2472
2473 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2474 rv[n].size = 0;
2475 return rv;
2476 }
2477 #endif
2478
2479 static int init_super_ddf_bvd(struct supertype *st,
2480 mdu_array_info_t *info,
2481 unsigned long long size,
2482 char *name, char *homehost,
2483 int *uuid, unsigned long long data_offset)
2484 {
2485 /* We are creating a BVD inside a pre-existing container.
2486 * so st->sb is already set.
2487 * We need to create a new vd_config and a new virtual_entry
2488 */
2489 struct ddf_super *ddf = st->sb;
2490 unsigned int venum, i;
2491 struct virtual_entry *ve;
2492 struct vcl *vcl;
2493 struct vd_config *vc;
2494
2495 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2496 pr_err("This ddf already has an array called %s\n", name);
2497 return 0;
2498 }
2499 venum = find_unused_vde(ddf);
2500 if (venum == DDF_NOTFOUND) {
2501 pr_err("Cannot find spare slot for virtual disk\n");
2502 return 0;
2503 }
2504 ve = &ddf->virt->entries[venum];
2505
2506 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2507 * timestamp, random number
2508 */
2509 make_header_guid(ve->guid);
2510 ve->unit = cpu_to_be16(info->md_minor);
2511 ve->pad0 = 0xFFFF;
2512 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2513 DDF_GUID_LEN);
2514 ve->type = cpu_to_be16(0);
2515 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2516 if (info->state & 1) /* clean */
2517 ve->init_state = DDF_init_full;
2518 else
2519 ve->init_state = DDF_init_not;
2520
2521 memset(ve->pad1, 0xff, 14);
2522 memset(ve->name, ' ', 16);
2523 if (name)
2524 strncpy(ve->name, name, 16);
2525 ddf->virt->populated_vdes =
2526 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2527
2528 /* Now create a new vd_config */
2529 if (posix_memalign((void**)&vcl, 512,
2530 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2531 pr_err("%s could not allocate vd_config\n", __func__);
2532 return 0;
2533 }
2534 vcl->vcnum = venum;
2535 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2536 vc = &vcl->conf;
2537
2538 vc->magic = DDF_VD_CONF_MAGIC;
2539 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2540 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2541 vc->seqnum = cpu_to_be32(1);
2542 memset(vc->pad0, 0xff, 24);
2543 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2544 if (layout_md2ddf(info, vc) == -1 ||
2545 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2546 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2547 __func__, info->level, info->layout, info->raid_disks);
2548 free(vcl);
2549 return 0;
2550 }
2551 vc->sec_elmnt_seq = 0;
2552 if (alloc_other_bvds(ddf, vcl) != 0) {
2553 pr_err("%s could not allocate other bvds\n",
2554 __func__);
2555 free(vcl);
2556 return 0;
2557 }
2558 vc->blocks = cpu_to_be64(info->size * 2);
2559 vc->array_blocks = cpu_to_be64(
2560 calc_array_size(info->level, info->raid_disks, info->layout,
2561 info->chunk_size, info->size*2));
2562 memset(vc->pad1, 0xff, 8);
2563 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2564 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2565 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2566 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2567 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2568 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2569 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2570 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2571 memset(vc->cache_pol, 0, 8);
2572 vc->bg_rate = 0x80;
2573 memset(vc->pad2, 0xff, 3);
2574 memset(vc->pad3, 0xff, 52);
2575 memset(vc->pad4, 0xff, 192);
2576 memset(vc->v0, 0xff, 32);
2577 memset(vc->v1, 0xff, 32);
2578 memset(vc->v2, 0xff, 16);
2579 memset(vc->v3, 0xff, 16);
2580 memset(vc->vendor, 0xff, 32);
2581
2582 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2583 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2584
2585 for (i = 1; i < vc->sec_elmnt_count; i++) {
2586 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2587 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2588 }
2589
2590 vcl->next = ddf->conflist;
2591 ddf->conflist = vcl;
2592 ddf->currentconf = vcl;
2593 ddf_set_updates_pending(ddf);
2594 return 1;
2595 }
2596
2597
2598 #ifndef MDASSEMBLE
2599 static int get_svd_state(const struct ddf_super *, const struct vcl *);
2600
2601 static void add_to_super_ddf_bvd(struct supertype *st,
2602 mdu_disk_info_t *dk, int fd, char *devname)
2603 {
2604 /* fd and devname identify a device with-in the ddf container (st).
2605 * dk identifies a location in the new BVD.
2606 * We need to find suitable free space in that device and update
2607 * the phys_refnum and lba_offset for the newly created vd_config.
2608 * We might also want to update the type in the phys_disk
2609 * section.
2610 *
2611 * Alternately: fd == -1 and we have already chosen which device to
2612 * use and recorded in dlist->raid_disk;
2613 */
2614 struct dl *dl;
2615 struct ddf_super *ddf = st->sb;
2616 struct vd_config *vc;
2617 unsigned int i;
2618 unsigned long long blocks, pos, esize;
2619 struct extent *ex;
2620 unsigned int raid_disk = dk->raid_disk;
2621
2622 if (fd == -1) {
2623 for (dl = ddf->dlist; dl ; dl = dl->next)
2624 if (dl->raiddisk == dk->raid_disk)
2625 break;
2626 } else {
2627 for (dl = ddf->dlist; dl ; dl = dl->next)
2628 if (dl->major == dk->major &&
2629 dl->minor == dk->minor)
2630 break;
2631 }
2632 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2633 return;
2634
2635 vc = &ddf->currentconf->conf;
2636 if (vc->sec_elmnt_count > 1) {
2637 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2638 if (raid_disk >= n)
2639 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2640 raid_disk %= n;
2641 }
2642
2643 ex = get_extents(ddf, dl);
2644 if (!ex)
2645 return;
2646
2647 i = 0; pos = 0;
2648 blocks = be64_to_cpu(vc->blocks);
2649 if (ddf->currentconf->block_sizes)
2650 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2651
2652 do {
2653 esize = ex[i].start - pos;
2654 if (esize >= blocks)
2655 break;
2656 pos = ex[i].start + ex[i].size;
2657 i++;
2658 } while (ex[i-1].size);
2659
2660 free(ex);
2661 if (esize < blocks)
2662 return;
2663
2664 ddf->currentdev = dk->raid_disk;
2665 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2666 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2667
2668 for (i = 0; i < ddf->max_part ; i++)
2669 if (dl->vlist[i] == NULL)
2670 break;
2671 if (i == ddf->max_part)
2672 return;
2673 dl->vlist[i] = ddf->currentconf;
2674
2675 if (fd >= 0)
2676 dl->fd = fd;
2677 if (devname)
2678 dl->devname = devname;
2679
2680 /* Check if we can mark array as optimal yet */
2681 i = ddf->currentconf->vcnum;
2682 ddf->virt->entries[i].state =
2683 (ddf->virt->entries[i].state & ~DDF_state_mask)
2684 | get_svd_state(ddf, ddf->currentconf);
2685 be16_clear(ddf->phys->entries[dl->pdnum].type,
2686 cpu_to_be16(DDF_Global_Spare));
2687 be16_set(ddf->phys->entries[dl->pdnum].type,
2688 cpu_to_be16(DDF_Active_in_VD));
2689 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2690 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2691 ddf->currentconf->vcnum, guid_str(vc->guid),
2692 dk->raid_disk);
2693 ddf_set_updates_pending(ddf);
2694 }
2695
2696 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2697 {
2698 unsigned int i;
2699 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2700 if (all_ff(ddf->phys->entries[i].guid))
2701 return i;
2702 }
2703 return DDF_NOTFOUND;
2704 }
2705
2706 /* add a device to a container, either while creating it or while
2707 * expanding a pre-existing container
2708 */
2709 static int add_to_super_ddf(struct supertype *st,
2710 mdu_disk_info_t *dk, int fd, char *devname,
2711 unsigned long long data_offset)
2712 {
2713 struct ddf_super *ddf = st->sb;
2714 struct dl *dd;
2715 time_t now;
2716 struct tm *tm;
2717 unsigned long long size;
2718 struct phys_disk_entry *pde;
2719 unsigned int n, i;
2720 struct stat stb;
2721 __u32 *tptr;
2722
2723 if (ddf->currentconf) {
2724 add_to_super_ddf_bvd(st, dk, fd, devname);
2725 return 0;
2726 }
2727
2728 /* This is device numbered dk->number. We need to create
2729 * a phys_disk entry and a more detailed disk_data entry.
2730 */
2731 fstat(fd, &stb);
2732 n = find_unused_pde(ddf);
2733 if (n == DDF_NOTFOUND) {
2734 pr_err("%s: No free slot in array, cannot add disk\n",
2735 __func__);
2736 return 1;
2737 }
2738 pde = &ddf->phys->entries[n];
2739 get_dev_size(fd, NULL, &size);
2740 if (size <= 32*1024*1024) {
2741 pr_err("%s: device size must be at least 32MB\n",
2742 __func__);
2743 return 1;
2744 }
2745 size >>= 9;
2746
2747 if (posix_memalign((void**)&dd, 512,
2748 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2749 pr_err("%s could allocate buffer for new disk, aborting\n",
2750 __func__);
2751 return 1;
2752 }
2753 dd->major = major(stb.st_rdev);
2754 dd->minor = minor(stb.st_rdev);
2755 dd->devname = devname;
2756 dd->fd = fd;
2757 dd->spare = NULL;
2758
2759 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2760 now = time(0);
2761 tm = localtime(&now);
2762 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2763 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2764 tptr = (__u32 *)(dd->disk.guid + 16);
2765 *tptr++ = random32();
2766 *tptr = random32();
2767
2768 do {
2769 /* Cannot be bothered finding a CRC of some irrelevant details*/
2770 dd->disk.refnum._v32 = random32();
2771 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2772 i > 0; i--)
2773 if (be32_eq(ddf->phys->entries[i-1].refnum,
2774 dd->disk.refnum))
2775 break;
2776 } while (i > 0);
2777
2778 dd->disk.forced_ref = 1;
2779 dd->disk.forced_guid = 1;
2780 memset(dd->disk.vendor, ' ', 32);
2781 memcpy(dd->disk.vendor, "Linux", 5);
2782 memset(dd->disk.pad, 0xff, 442);
2783 for (i = 0; i < ddf->max_part ; i++)
2784 dd->vlist[i] = NULL;
2785
2786 dd->pdnum = n;
2787
2788 if (st->update_tail) {
2789 int len = (sizeof(struct phys_disk) +
2790 sizeof(struct phys_disk_entry));
2791 struct phys_disk *pd;
2792
2793 pd = xmalloc(len);
2794 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2795 pd->used_pdes = cpu_to_be16(n);
2796 pde = &pd->entries[0];
2797 dd->mdupdate = pd;
2798 } else
2799 ddf->phys->used_pdes = cpu_to_be16(
2800 1 + be16_to_cpu(ddf->phys->used_pdes));
2801
2802 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2803 pde->refnum = dd->disk.refnum;
2804 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2805 pde->state = cpu_to_be16(DDF_Online);
2806 dd->size = size;
2807 /*
2808 * If there is already a device in dlist, try to reserve the same
2809 * amount of workspace. Otherwise, use 32MB.
2810 * We checked disk size above already.
2811 */
2812 #define __calc_lba(new, old, lba, mb) do { \
2813 unsigned long long dif; \
2814 if ((old) != NULL) \
2815 dif = (old)->size - be64_to_cpu((old)->lba); \
2816 else \
2817 dif = (new)->size; \
2818 if ((new)->size > dif) \
2819 (new)->lba = cpu_to_be64((new)->size - dif); \
2820 else \
2821 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2822 } while (0)
2823 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2824 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2825 if (ddf->dlist == NULL ||
2826 be64_to_cpu(ddf->dlist->secondary_lba) != ~(__u64)0)
2827 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2828 pde->config_size = dd->workspace_lba;
2829
2830 sprintf(pde->path, "%17.17s","Information: nil") ;
2831 memset(pde->pad, 0xff, 6);
2832
2833 if (st->update_tail) {
2834 dd->next = ddf->add_list;
2835 ddf->add_list = dd;
2836 } else {
2837 dd->next = ddf->dlist;
2838 ddf->dlist = dd;
2839 ddf_set_updates_pending(ddf);
2840 }
2841
2842 return 0;
2843 }
2844
2845 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2846 {
2847 struct ddf_super *ddf = st->sb;
2848 struct dl *dl;
2849
2850 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2851 * disappeared from the container.
2852 * We need to arrange that it disappears from the metadata and
2853 * internal data structures too.
2854 * Most of the work is done by ddf_process_update which edits
2855 * the metadata and closes the file handle and attaches the memory
2856 * where free_updates will free it.
2857 */
2858 for (dl = ddf->dlist; dl ; dl = dl->next)
2859 if (dl->major == dk->major &&
2860 dl->minor == dk->minor)
2861 break;
2862 if (!dl)
2863 return -1;
2864
2865 if (st->update_tail) {
2866 int len = (sizeof(struct phys_disk) +
2867 sizeof(struct phys_disk_entry));
2868 struct phys_disk *pd;
2869
2870 pd = xmalloc(len);
2871 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2872 pd->used_pdes = cpu_to_be16(dl->pdnum);
2873 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2874 append_metadata_update(st, pd, len);
2875 }
2876 return 0;
2877 }
2878 #endif
2879
2880 /*
2881 * This is the write_init_super method for a ddf container. It is
2882 * called when creating a container or adding another device to a
2883 * container.
2884 */
2885 #define NULL_CONF_SZ 4096
2886
2887 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2888 {
2889 unsigned long long sector;
2890 struct ddf_header *header;
2891 int fd, i, n_config, conf_size, buf_size;
2892 int ret = 0;
2893 char *conf;
2894
2895 fd = d->fd;
2896
2897 switch (type) {
2898 case DDF_HEADER_PRIMARY:
2899 header = &ddf->primary;
2900 sector = be64_to_cpu(header->primary_lba);
2901 break;
2902 case DDF_HEADER_SECONDARY:
2903 header = &ddf->secondary;
2904 sector = be64_to_cpu(header->secondary_lba);
2905 break;
2906 default:
2907 return 0;
2908 }
2909 if (sector == ~(__u64)0)
2910 return 0;
2911
2912 header->type = type;
2913 header->openflag = 1;
2914 header->crc = calc_crc(header, 512);
2915
2916 lseek64(fd, sector<<9, 0);
2917 if (write(fd, header, 512) < 0)
2918 goto out;
2919
2920 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2921 if (write(fd, &ddf->controller, 512) < 0)
2922 goto out;
2923
2924 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2925 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2926 goto out;
2927 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2928 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2929 goto out;
2930
2931 /* Now write lots of config records. */
2932 n_config = ddf->max_part;
2933 conf_size = ddf->conf_rec_len * 512;
2934 conf = ddf->conf;
2935 buf_size = conf_size * (n_config + 1);
2936 if (!conf) {
2937 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
2938 goto out;
2939 ddf->conf = conf;
2940 }
2941 for (i = 0 ; i <= n_config ; i++) {
2942 struct vcl *c;
2943 struct vd_config *vdc = NULL;
2944 if (i == n_config) {
2945 c = (struct vcl *)d->spare;
2946 if (c)
2947 vdc = &c->conf;
2948 } else {
2949 unsigned int dummy;
2950 c = d->vlist[i];
2951 if (c)
2952 get_pd_index_from_refnum(
2953 c, d->disk.refnum,
2954 ddf->mppe,
2955 (const struct vd_config **)&vdc,
2956 &dummy);
2957 }
2958 if (c) {
2959 dprintf("writing conf record %i on disk %08x for %s/%u\n",
2960 i, be32_to_cpu(d->disk.refnum),
2961 guid_str(vdc->guid),
2962 vdc->sec_elmnt_seq);
2963 vdc->seqnum = header->seq;
2964 vdc->crc = calc_crc(vdc, conf_size);
2965 memcpy(conf + i*conf_size, vdc, conf_size);
2966 } else
2967 memset(conf + i*conf_size, 0xff, conf_size);
2968 }
2969 if (write(fd, conf, buf_size) != buf_size)
2970 goto out;
2971
2972 d->disk.crc = calc_crc(&d->disk, 512);
2973 if (write(fd, &d->disk, 512) < 0)
2974 goto out;
2975
2976 ret = 1;
2977 out:
2978 header->openflag = 0;
2979 header->crc = calc_crc(header, 512);
2980
2981 lseek64(fd, sector<<9, 0);
2982 if (write(fd, header, 512) < 0)
2983 ret = 0;
2984
2985 return ret;
2986 }
2987
2988 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
2989 {
2990 unsigned long long size;
2991 int fd = d->fd;
2992 if (fd < 0)
2993 return 0;
2994
2995 /* We need to fill in the primary, (secondary) and workspace
2996 * lba's in the headers, set their checksums,
2997 * Also checksum phys, virt....
2998 *
2999 * Then write everything out, finally the anchor is written.
3000 */
3001 get_dev_size(fd, NULL, &size);
3002 size /= 512;
3003 if (be64_to_cpu(d->workspace_lba) != 0ULL)
3004 ddf->anchor.workspace_lba = d->workspace_lba;
3005 else
3006 ddf->anchor.workspace_lba =
3007 cpu_to_be64(size - 32*1024*2);
3008 if (be64_to_cpu(d->primary_lba) != 0ULL)
3009 ddf->anchor.primary_lba = d->primary_lba;
3010 else
3011 ddf->anchor.primary_lba =
3012 cpu_to_be64(size - 16*1024*2);
3013 if (be64_to_cpu(d->secondary_lba) != 0ULL)
3014 ddf->anchor.secondary_lba = d->secondary_lba;
3015 else
3016 ddf->anchor.secondary_lba =
3017 cpu_to_be64(size - 32*1024*2);
3018 ddf->anchor.seq = ddf->active->seq;
3019 memcpy(&ddf->primary, &ddf->anchor, 512);
3020 memcpy(&ddf->secondary, &ddf->anchor, 512);
3021
3022 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3023 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3024 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3025
3026 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3027 return 0;
3028
3029 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3030 return 0;
3031
3032 lseek64(fd, (size-1)*512, SEEK_SET);
3033 if (write(fd, &ddf->anchor, 512) < 0)
3034 return 0;
3035
3036 return 1;
3037 }
3038
3039 #ifndef MDASSEMBLE
3040 static int __write_init_super_ddf(struct supertype *st)
3041 {
3042 struct ddf_super *ddf = st->sb;
3043 struct dl *d;
3044 int attempts = 0;
3045 int successes = 0;
3046
3047 pr_state(ddf, __func__);
3048
3049 /* try to write updated metadata,
3050 * if we catch a failure move on to the next disk
3051 */
3052 for (d = ddf->dlist; d; d=d->next) {
3053 attempts++;
3054 successes += _write_super_to_disk(ddf, d);
3055 }
3056
3057 return attempts != successes;
3058 }
3059
3060 static int write_init_super_ddf(struct supertype *st)
3061 {
3062 struct ddf_super *ddf = st->sb;
3063 struct vcl *currentconf = ddf->currentconf;
3064
3065 /* we are done with currentconf reset it to point st at the container */
3066 ddf->currentconf = NULL;
3067
3068 if (st->update_tail) {
3069 /* queue the virtual_disk and vd_config as metadata updates */
3070 struct virtual_disk *vd;
3071 struct vd_config *vc;
3072 int len, tlen;
3073 unsigned int i;
3074
3075 if (!currentconf) {
3076 int len = (sizeof(struct phys_disk) +
3077 sizeof(struct phys_disk_entry));
3078
3079 /* adding a disk to the container. */
3080 if (!ddf->add_list)
3081 return 0;
3082
3083 append_metadata_update(st, ddf->add_list->mdupdate, len);
3084 ddf->add_list->mdupdate = NULL;
3085 return 0;
3086 }
3087
3088 /* Newly created VD */
3089
3090 /* First the virtual disk. We have a slightly fake header */
3091 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3092 vd = xmalloc(len);
3093 *vd = *ddf->virt;
3094 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3095 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3096 append_metadata_update(st, vd, len);
3097
3098 /* Then the vd_config */
3099 len = ddf->conf_rec_len * 512;
3100 tlen = len * currentconf->conf.sec_elmnt_count;
3101 vc = xmalloc(tlen);
3102 memcpy(vc, &currentconf->conf, len);
3103 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3104 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3105 len);
3106 append_metadata_update(st, vc, tlen);
3107
3108 /* FIXME I need to close the fds! */
3109 return 0;
3110 } else {
3111 struct dl *d;
3112 if (!currentconf)
3113 for (d = ddf->dlist; d; d=d->next)
3114 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3115 return __write_init_super_ddf(st);
3116 }
3117 }
3118
3119 #endif
3120
3121 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3122 unsigned long long data_offset)
3123 {
3124 /* We must reserve the last 32Meg */
3125 if (devsize <= 32*1024*2)
3126 return 0;
3127 return devsize - 32*1024*2;
3128 }
3129
3130 #ifndef MDASSEMBLE
3131
3132 static int reserve_space(struct supertype *st, int raiddisks,
3133 unsigned long long size, int chunk,
3134 unsigned long long *freesize)
3135 {
3136 /* Find 'raiddisks' spare extents at least 'size' big (but
3137 * only caring about multiples of 'chunk') and remember
3138 * them.
3139 * If the cannot be found, fail.
3140 */
3141 struct dl *dl;
3142 struct ddf_super *ddf = st->sb;
3143 int cnt = 0;
3144
3145 for (dl = ddf->dlist; dl ; dl=dl->next) {
3146 dl->raiddisk = -1;
3147 dl->esize = 0;
3148 }
3149 /* Now find largest extent on each device */
3150 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3151 struct extent *e = get_extents(ddf, dl);
3152 unsigned long long pos = 0;
3153 int i = 0;
3154 int found = 0;
3155 unsigned long long minsize = size;
3156
3157 if (size == 0)
3158 minsize = chunk;
3159
3160 if (!e)
3161 continue;
3162 do {
3163 unsigned long long esize;
3164 esize = e[i].start - pos;
3165 if (esize >= minsize) {
3166 found = 1;
3167 minsize = esize;
3168 }
3169 pos = e[i].start + e[i].size;
3170 i++;
3171 } while (e[i-1].size);
3172 if (found) {
3173 cnt++;
3174 dl->esize = minsize;
3175 }
3176 free(e);
3177 }
3178 if (cnt < raiddisks) {
3179 pr_err("not enough devices with space to create array.\n");
3180 return 0; /* No enough free spaces large enough */
3181 }
3182 if (size == 0) {
3183 /* choose the largest size of which there are at least 'raiddisk' */
3184 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3185 struct dl *dl2;
3186 if (dl->esize <= size)
3187 continue;
3188 /* This is bigger than 'size', see if there are enough */
3189 cnt = 0;
3190 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3191 if (dl2->esize >= dl->esize)
3192 cnt++;
3193 if (cnt >= raiddisks)
3194 size = dl->esize;
3195 }
3196 if (chunk) {
3197 size = size / chunk;
3198 size *= chunk;
3199 }
3200 *freesize = size;
3201 if (size < 32) {
3202 pr_err("not enough spare devices to create array.\n");
3203 return 0;
3204 }
3205 }
3206 /* We have a 'size' of which there are enough spaces.
3207 * We simply do a first-fit */
3208 cnt = 0;
3209 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3210 if (dl->esize < size)
3211 continue;
3212
3213 dl->raiddisk = cnt;
3214 cnt++;
3215 }
3216 return 1;
3217 }
3218
3219 static int
3220 validate_geometry_ddf_container(struct supertype *st,
3221 int level, int layout, int raiddisks,
3222 int chunk, unsigned long long size,
3223 unsigned long long data_offset,
3224 char *dev, unsigned long long *freesize,
3225 int verbose);
3226
3227 static int validate_geometry_ddf_bvd(struct supertype *st,
3228 int level, int layout, int raiddisks,
3229 int *chunk, unsigned long long size,
3230 unsigned long long data_offset,
3231 char *dev, unsigned long long *freesize,
3232 int verbose);
3233
3234 static int validate_geometry_ddf(struct supertype *st,
3235 int level, int layout, int raiddisks,
3236 int *chunk, unsigned long long size,
3237 unsigned long long data_offset,
3238 char *dev, unsigned long long *freesize,
3239 int verbose)
3240 {
3241 int fd;
3242 struct mdinfo *sra;
3243 int cfd;
3244
3245 /* ddf potentially supports lots of things, but it depends on
3246 * what devices are offered (and maybe kernel version?)
3247 * If given unused devices, we will make a container.
3248 * If given devices in a container, we will make a BVD.
3249 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3250 */
3251
3252 if (*chunk == UnSet)
3253 *chunk = DEFAULT_CHUNK;
3254
3255 if (level == -1000000) level = LEVEL_CONTAINER;
3256 if (level == LEVEL_CONTAINER) {
3257 /* Must be a fresh device to add to a container */
3258 return validate_geometry_ddf_container(st, level, layout,
3259 raiddisks, *chunk,
3260 size, data_offset, dev,
3261 freesize,
3262 verbose);
3263 }
3264
3265 if (!dev) {
3266 mdu_array_info_t array = {
3267 .level = level, .layout = layout,
3268 .raid_disks = raiddisks
3269 };
3270 struct vd_config conf;
3271 if (layout_md2ddf(&array, &conf) == -1) {
3272 if (verbose)
3273 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3274 level, layout, raiddisks);
3275 return 0;
3276 }
3277 /* Should check layout? etc */
3278
3279 if (st->sb && freesize) {
3280 /* --create was given a container to create in.
3281 * So we need to check that there are enough
3282 * free spaces and return the amount of space.
3283 * We may as well remember which drives were
3284 * chosen so that add_to_super/getinfo_super
3285 * can return them.
3286 */
3287 return reserve_space(st, raiddisks, size, *chunk, freesize);
3288 }
3289 return 1;
3290 }
3291
3292 if (st->sb) {
3293 /* A container has already been opened, so we are
3294 * creating in there. Maybe a BVD, maybe an SVD.
3295 * Should make a distinction one day.
3296 */
3297 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3298 chunk, size, data_offset, dev,
3299 freesize,
3300 verbose);
3301 }
3302 /* This is the first device for the array.
3303 * If it is a container, we read it in and do automagic allocations,
3304 * no other devices should be given.
3305 * Otherwise it must be a member device of a container, and we
3306 * do manual allocation.
3307 * Later we should check for a BVD and make an SVD.
3308 */
3309 fd = open(dev, O_RDONLY|O_EXCL, 0);
3310 if (fd >= 0) {
3311 sra = sysfs_read(fd, NULL, GET_VERSION);
3312 close(fd);
3313 if (sra && sra->array.major_version == -1 &&
3314 strcmp(sra->text_version, "ddf") == 0) {
3315
3316 /* load super */
3317 /* find space for 'n' devices. */
3318 /* remember the devices */
3319 /* Somehow return the fact that we have enough */
3320 }
3321
3322 if (verbose)
3323 pr_err("ddf: Cannot create this array "
3324 "on device %s - a container is required.\n",
3325 dev);
3326 return 0;
3327 }
3328 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3329 if (verbose)
3330 pr_err("ddf: Cannot open %s: %s\n",
3331 dev, strerror(errno));
3332 return 0;
3333 }
3334 /* Well, it is in use by someone, maybe a 'ddf' container. */
3335 cfd = open_container(fd);
3336 if (cfd < 0) {
3337 close(fd);
3338 if (verbose)
3339 pr_err("ddf: Cannot use %s: %s\n",
3340 dev, strerror(EBUSY));
3341 return 0;
3342 }
3343 sra = sysfs_read(cfd, NULL, GET_VERSION);
3344 close(fd);
3345 if (sra && sra->array.major_version == -1 &&
3346 strcmp(sra->text_version, "ddf") == 0) {
3347 /* This is a member of a ddf container. Load the container
3348 * and try to create a bvd
3349 */
3350 struct ddf_super *ddf;
3351 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3352 st->sb = ddf;
3353 strcpy(st->container_devnm, fd2devnm(cfd));
3354 close(cfd);
3355 return validate_geometry_ddf_bvd(st, level, layout,
3356 raiddisks, chunk, size,
3357 data_offset,
3358 dev, freesize,
3359 verbose);
3360 }
3361 close(cfd);
3362 } else /* device may belong to a different container */
3363 return 0;
3364
3365 return 1;
3366 }
3367
3368 static int
3369 validate_geometry_ddf_container(struct supertype *st,
3370 int level, int layout, int raiddisks,
3371 int chunk, unsigned long long size,
3372 unsigned long long data_offset,
3373 char *dev, unsigned long long *freesize,
3374 int verbose)
3375 {
3376 int fd;
3377 unsigned long long ldsize;
3378
3379 if (level != LEVEL_CONTAINER)
3380 return 0;
3381 if (!dev)
3382 return 1;
3383
3384 fd = open(dev, O_RDONLY|O_EXCL, 0);
3385 if (fd < 0) {
3386 if (verbose)
3387 pr_err("ddf: Cannot open %s: %s\n",
3388 dev, strerror(errno));
3389 return 0;
3390 }
3391 if (!get_dev_size(fd, dev, &ldsize)) {
3392 close(fd);
3393 return 0;
3394 }
3395 close(fd);
3396
3397 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3398 if (*freesize == 0)
3399 return 0;
3400
3401 return 1;
3402 }
3403
3404 static int validate_geometry_ddf_bvd(struct supertype *st,
3405 int level, int layout, int raiddisks,
3406 int *chunk, unsigned long long size,
3407 unsigned long long data_offset,
3408 char *dev, unsigned long long *freesize,
3409 int verbose)
3410 {
3411 struct stat stb;
3412 struct ddf_super *ddf = st->sb;
3413 struct dl *dl;
3414 unsigned long long pos = 0;
3415 unsigned long long maxsize;
3416 struct extent *e;
3417 int i;
3418 /* ddf/bvd supports lots of things, but not containers */
3419 if (level == LEVEL_CONTAINER) {
3420 if (verbose)
3421 pr_err("DDF cannot create a container within an container\n");
3422 return 0;
3423 }
3424 /* We must have the container info already read in. */
3425 if (!ddf)
3426 return 0;
3427
3428 if (!dev) {
3429 /* General test: make sure there is space for
3430 * 'raiddisks' device extents of size 'size'.
3431 */
3432 unsigned long long minsize = size;
3433 int dcnt = 0;
3434 if (minsize == 0)
3435 minsize = 8;
3436 for (dl = ddf->dlist; dl ; dl = dl->next)
3437 {
3438 int found = 0;
3439 pos = 0;
3440
3441 i = 0;
3442 e = get_extents(ddf, dl);
3443 if (!e) continue;
3444 do {
3445 unsigned long long esize;
3446 esize = e[i].start - pos;
3447 if (esize >= minsize)
3448 found = 1;
3449 pos = e[i].start + e[i].size;
3450 i++;
3451 } while (e[i-1].size);
3452 if (found)
3453 dcnt++;
3454 free(e);
3455 }
3456 if (dcnt < raiddisks) {
3457 if (verbose)
3458 pr_err("ddf: Not enough devices with "
3459 "space for this array (%d < %d)\n",
3460 dcnt, raiddisks);
3461 return 0;
3462 }
3463 return 1;
3464 }
3465 /* This device must be a member of the set */
3466 if (stat(dev, &stb) < 0)
3467 return 0;
3468 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3469 return 0;
3470 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3471 if (dl->major == (int)major(stb.st_rdev) &&
3472 dl->minor == (int)minor(stb.st_rdev))
3473 break;
3474 }
3475 if (!dl) {
3476 if (verbose)
3477 pr_err("ddf: %s is not in the "
3478 "same DDF set\n",
3479 dev);
3480 return 0;
3481 }
3482 e = get_extents(ddf, dl);
3483 maxsize = 0;
3484 i = 0;
3485 if (e) do {
3486 unsigned long long esize;
3487 esize = e[i].start - pos;
3488 if (esize >= maxsize)
3489 maxsize = esize;
3490 pos = e[i].start + e[i].size;
3491 i++;
3492 } while (e[i-1].size);
3493 *freesize = maxsize;
3494 // FIXME here I am
3495
3496 return 1;
3497 }
3498
3499 static int load_super_ddf_all(struct supertype *st, int fd,
3500 void **sbp, char *devname)
3501 {
3502 struct mdinfo *sra;
3503 struct ddf_super *super;
3504 struct mdinfo *sd, *best = NULL;
3505 int bestseq = 0;
3506 int seq;
3507 char nm[20];
3508 int dfd;
3509
3510 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3511 if (!sra)
3512 return 1;
3513 if (sra->array.major_version != -1 ||
3514 sra->array.minor_version != -2 ||
3515 strcmp(sra->text_version, "ddf") != 0)
3516 return 1;
3517
3518 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3519 return 1;
3520 memset(super, 0, sizeof(*super));
3521
3522 /* first, try each device, and choose the best ddf */
3523 for (sd = sra->devs ; sd ; sd = sd->next) {
3524 int rv;
3525 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3526 dfd = dev_open(nm, O_RDONLY);
3527 if (dfd < 0)
3528 return 2;
3529 rv = load_ddf_headers(dfd, super, NULL);
3530 close(dfd);
3531 if (rv == 0) {
3532 seq = be32_to_cpu(super->active->seq);
3533 if (super->active->openflag)
3534 seq--;
3535 if (!best || seq > bestseq) {
3536 bestseq = seq;
3537 best = sd;
3538 }
3539 }
3540 }
3541 if (!best)
3542 return 1;
3543 /* OK, load this ddf */
3544 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3545 dfd = dev_open(nm, O_RDONLY);
3546 if (dfd < 0)
3547 return 1;
3548 load_ddf_headers(dfd, super, NULL);
3549 load_ddf_global(dfd, super, NULL);
3550 close(dfd);
3551 /* Now we need the device-local bits */
3552 for (sd = sra->devs ; sd ; sd = sd->next) {
3553 int rv;
3554
3555 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3556 dfd = dev_open(nm, O_RDWR);
3557 if (dfd < 0)
3558 return 2;
3559 rv = load_ddf_headers(dfd, super, NULL);
3560 if (rv == 0)
3561 rv = load_ddf_local(dfd, super, NULL, 1);
3562 if (rv)
3563 return 1;
3564 }
3565
3566 *sbp = super;
3567 if (st->ss == NULL) {
3568 st->ss = &super_ddf;
3569 st->minor_version = 0;
3570 st->max_devs = 512;
3571 }
3572 strcpy(st->container_devnm, fd2devnm(fd));
3573 return 0;
3574 }
3575
3576 static int load_container_ddf(struct supertype *st, int fd,
3577 char *devname)
3578 {
3579 return load_super_ddf_all(st, fd, &st->sb, devname);
3580 }
3581
3582 #endif /* MDASSEMBLE */
3583
3584 static int check_secondary(const struct vcl *vc)
3585 {
3586 const struct vd_config *conf = &vc->conf;
3587 int i;
3588
3589 /* The only DDF secondary RAID level md can support is
3590 * RAID 10, if the stripe sizes and Basic volume sizes
3591 * are all equal.
3592 * Other configurations could in theory be supported by exposing
3593 * the BVDs to user space and using device mapper for the secondary
3594 * mapping. So far we don't support that.
3595 */
3596
3597 __u64 sec_elements[4] = {0, 0, 0, 0};
3598 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3599 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3600
3601 if (vc->other_bvds == NULL) {
3602 pr_err("No BVDs for secondary RAID found\n");
3603 return -1;
3604 }
3605 if (conf->prl != DDF_RAID1) {
3606 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3607 return -1;
3608 }
3609 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3610 pr_err("Secondary RAID level %d is unsupported\n",
3611 conf->srl);
3612 return -1;
3613 }
3614 __set_sec_seen(conf->sec_elmnt_seq);
3615 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3616 const struct vd_config *bvd = vc->other_bvds[i];
3617 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3618 continue;
3619 if (bvd->srl != conf->srl) {
3620 pr_err("Inconsistent secondary RAID level across BVDs\n");
3621 return -1;
3622 }
3623 if (bvd->prl != conf->prl) {
3624 pr_err("Different RAID levels for BVDs are unsupported\n");
3625 return -1;
3626 }
3627 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3628 pr_err("All BVDs must have the same number of primary elements\n");
3629 return -1;
3630 }
3631 if (bvd->chunk_shift != conf->chunk_shift) {
3632 pr_err("Different strip sizes for BVDs are unsupported\n");
3633 return -1;
3634 }
3635 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3636 pr_err("Different BVD sizes are unsupported\n");
3637 return -1;
3638 }
3639 __set_sec_seen(bvd->sec_elmnt_seq);
3640 }
3641 for (i = 0; i < conf->sec_elmnt_count; i++) {
3642 if (!__was_sec_seen(i)) {
3643 pr_err("BVD %d is missing\n", i);
3644 return -1;
3645 }
3646 }
3647 return 0;
3648 }
3649
3650 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3651 be32 refnum, unsigned int nmax,
3652 const struct vd_config **bvd,
3653 unsigned int *idx)
3654 {
3655 unsigned int i, j, n, sec, cnt;
3656
3657 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3658 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3659
3660 for (i = 0, j = 0 ; i < nmax ; i++) {
3661 /* j counts valid entries for this BVD */
3662 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3663 j++;
3664 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3665 *bvd = &vc->conf;
3666 *idx = i;
3667 return sec * cnt + j - 1;
3668 }
3669 }
3670 if (vc->other_bvds == NULL)
3671 goto bad;
3672
3673 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3674 struct vd_config *vd = vc->other_bvds[n-1];
3675 sec = vd->sec_elmnt_seq;
3676 if (sec == DDF_UNUSED_BVD)
3677 continue;
3678 for (i = 0, j = 0 ; i < nmax ; i++) {
3679 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3680 j++;
3681 if (be32_eq(vd->phys_refnum[i], refnum)) {
3682 *bvd = vd;
3683 *idx = i;
3684 return sec * cnt + j - 1;
3685 }
3686 }
3687 }
3688 bad:
3689 *bvd = NULL;
3690 return DDF_NOTFOUND;
3691 }
3692
3693 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3694 {
3695 /* Given a container loaded by load_super_ddf_all,
3696 * extract information about all the arrays into
3697 * an mdinfo tree.
3698 *
3699 * For each vcl in conflist: create an mdinfo, fill it in,
3700 * then look for matching devices (phys_refnum) in dlist
3701 * and create appropriate device mdinfo.
3702 */
3703 struct ddf_super *ddf = st->sb;
3704 struct mdinfo *rest = NULL;
3705 struct vcl *vc;
3706
3707 for (vc = ddf->conflist ; vc ; vc=vc->next)
3708 {
3709 unsigned int i;
3710 struct mdinfo *this;
3711 char *ep;
3712 __u32 *cptr;
3713 unsigned int pd;
3714
3715 if (subarray &&
3716 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3717 *ep != '\0'))
3718 continue;
3719
3720 if (vc->conf.sec_elmnt_count > 1) {
3721 if (check_secondary(vc) != 0)
3722 continue;
3723 }
3724
3725 this = xcalloc(1, sizeof(*this));
3726 this->next = rest;
3727 rest = this;
3728
3729 if (layout_ddf2md(&vc->conf, &this->array))
3730 continue;
3731 this->array.md_minor = -1;
3732 this->array.major_version = -1;
3733 this->array.minor_version = -2;
3734 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3735 cptr = (__u32 *)(vc->conf.guid + 16);
3736 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3737 this->array.utime = DECADE +
3738 be32_to_cpu(vc->conf.timestamp);
3739 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3740
3741 i = vc->vcnum;
3742 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3743 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3744 DDF_init_full) {
3745 this->array.state = 0;
3746 this->resync_start = 0;
3747 } else {
3748 this->array.state = 1;
3749 this->resync_start = MaxSector;
3750 }
3751 _ddf_array_name(this->name, ddf, i);
3752 memset(this->uuid, 0, sizeof(this->uuid));
3753 this->component_size = be64_to_cpu(vc->conf.blocks);
3754 this->array.size = this->component_size / 2;
3755 this->container_member = i;
3756
3757 ddf->currentconf = vc;
3758 uuid_from_super_ddf(st, this->uuid);
3759 if (!subarray)
3760 ddf->currentconf = NULL;
3761
3762 sprintf(this->text_version, "/%s/%d",
3763 st->container_devnm, this->container_member);
3764
3765 for (pd = 0; pd < be16_to_cpu(ddf->phys->used_pdes); pd++) {
3766 struct mdinfo *dev;
3767 struct dl *d;
3768 const struct vd_config *bvd;
3769 unsigned int iphys;
3770 int stt;
3771
3772 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3773 == 0xFFFFFFFF)
3774 continue;
3775
3776 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3777 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3778 != DDF_Online)
3779 continue;
3780
3781 i = get_pd_index_from_refnum(
3782 vc, ddf->phys->entries[pd].refnum,
3783 ddf->mppe, &bvd, &iphys);
3784 if (i == DDF_NOTFOUND)
3785 continue;
3786
3787 this->array.working_disks++;
3788
3789 for (d = ddf->dlist; d ; d=d->next)
3790 if (be32_eq(d->disk.refnum,
3791 ddf->phys->entries[pd].refnum))
3792 break;
3793 if (d == NULL)
3794 /* Haven't found that one yet, maybe there are others */
3795 continue;
3796
3797 dev = xcalloc(1, sizeof(*dev));
3798 dev->next = this->devs;
3799 this->devs = dev;
3800
3801 dev->disk.number = be32_to_cpu(d->disk.refnum);
3802 dev->disk.major = d->major;
3803 dev->disk.minor = d->minor;
3804 dev->disk.raid_disk = i;
3805 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3806 dev->recovery_start = MaxSector;
3807
3808 dev->events = be32_to_cpu(ddf->primary.seq);
3809 dev->data_offset =
3810 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3811 dev->component_size = be64_to_cpu(bvd->blocks);
3812 if (d->devname)
3813 strcpy(dev->name, d->devname);
3814 }
3815 }
3816 return rest;
3817 }
3818
3819 static int store_super_ddf(struct supertype *st, int fd)
3820 {
3821 struct ddf_super *ddf = st->sb;
3822 unsigned long long dsize;
3823 void *buf;
3824 int rc;
3825
3826 if (!ddf)
3827 return 1;
3828
3829 if (!get_dev_size(fd, NULL, &dsize))
3830 return 1;
3831
3832 if (ddf->dlist || ddf->conflist) {
3833 struct stat sta;
3834 struct dl *dl;
3835 int ofd, ret;
3836
3837 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3838 pr_err("%s: file descriptor for invalid device\n",
3839 __func__);
3840 return 1;
3841 }
3842 for (dl = ddf->dlist; dl; dl = dl->next)
3843 if (dl->major == (int)major(sta.st_rdev) &&
3844 dl->minor == (int)minor(sta.st_rdev))
3845 break;
3846 if (!dl) {
3847 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3848 (int)major(sta.st_rdev),
3849 (int)minor(sta.st_rdev));
3850 return 1;
3851 }
3852 ofd = dl->fd;
3853 dl->fd = fd;
3854 ret = (_write_super_to_disk(ddf, dl) != 1);
3855 dl->fd = ofd;
3856 return ret;
3857 }
3858
3859 if (posix_memalign(&buf, 512, 512) != 0)
3860 return 1;
3861 memset(buf, 0, 512);
3862
3863 lseek64(fd, dsize-512, 0);
3864 rc = write(fd, buf, 512);
3865 free(buf);
3866 if (rc < 0)
3867 return 1;
3868 return 0;
3869 }
3870
3871 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3872 {
3873 /*
3874 * return:
3875 * 0 same, or first was empty, and second was copied
3876 * 1 second had wrong number
3877 * 2 wrong uuid
3878 * 3 wrong other info
3879 */
3880 struct ddf_super *first = st->sb;
3881 struct ddf_super *second = tst->sb;
3882 struct dl *dl1, *dl2;
3883 struct vcl *vl1, *vl2;
3884 unsigned int max_vds, max_pds, pd, vd;
3885
3886 if (!first) {
3887 st->sb = tst->sb;
3888 tst->sb = NULL;
3889 return 0;
3890 }
3891
3892 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3893 return 2;
3894
3895 if (!be32_eq(first->active->seq, second->active->seq)) {
3896 dprintf("%s: sequence number mismatch %u<->%u\n", __func__,
3897 be32_to_cpu(first->active->seq),
3898 be32_to_cpu(second->active->seq));
3899 return 3;
3900 }
3901 if (first->max_part != second->max_part ||
3902 !be16_eq(first->phys->used_pdes, second->phys->used_pdes) ||
3903 !be16_eq(first->virt->populated_vdes,
3904 second->virt->populated_vdes)) {
3905 dprintf("%s: PD/VD number mismatch\n", __func__);
3906 return 3;
3907 }
3908
3909 max_pds = be16_to_cpu(first->phys->used_pdes);
3910 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3911 for (pd = 0; pd < max_pds; pd++)
3912 if (be32_eq(first->phys->entries[pd].refnum,
3913 dl2->disk.refnum))
3914 break;
3915 if (pd == max_pds) {
3916 dprintf("%s: no match for disk %08x\n", __func__,
3917 be32_to_cpu(dl2->disk.refnum));
3918 return 3;
3919 }
3920 }
3921
3922 max_vds = be16_to_cpu(first->active->max_vd_entries);
3923 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3924 if (!be32_eq(vl2->conf.magic, DDF_VD_CONF_MAGIC))
3925 continue;
3926 for (vd = 0; vd < max_vds; vd++)
3927 if (!memcmp(first->virt->entries[vd].guid,
3928 vl2->conf.guid, DDF_GUID_LEN))
3929 break;
3930 if (vd == max_vds) {
3931 dprintf("%s: no match for VD config\n", __func__);
3932 return 3;
3933 }
3934 }
3935 /* FIXME should I look at anything else? */
3936
3937 /*
3938 At this point we are fairly sure that the meta data matches.
3939 But the new disk may contain additional local data.
3940 Add it to the super block.
3941 */
3942 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3943 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3944 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3945 DDF_GUID_LEN))
3946 break;
3947 if (vl1) {
3948 if (vl1->other_bvds != NULL &&
3949 vl1->conf.sec_elmnt_seq !=
3950 vl2->conf.sec_elmnt_seq) {
3951 dprintf("%s: adding BVD %u\n", __func__,
3952 vl2->conf.sec_elmnt_seq);
3953 add_other_bvd(vl1, &vl2->conf,
3954 first->conf_rec_len*512);
3955 }
3956 continue;
3957 }
3958
3959 if (posix_memalign((void **)&vl1, 512,
3960 (first->conf_rec_len*512 +
3961 offsetof(struct vcl, conf))) != 0) {
3962 pr_err("%s could not allocate vcl buf\n",
3963 __func__);
3964 return 3;
3965 }
3966
3967 vl1->next = first->conflist;
3968 vl1->block_sizes = NULL;
3969 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3970 if (alloc_other_bvds(first, vl1) != 0) {
3971 pr_err("%s could not allocate other bvds\n",
3972 __func__);
3973 free(vl1);
3974 return 3;
3975 }
3976 for (vd = 0; vd < max_vds; vd++)
3977 if (!memcmp(first->virt->entries[vd].guid,
3978 vl1->conf.guid, DDF_GUID_LEN))
3979 break;
3980 vl1->vcnum = vd;
3981 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
3982 first->conflist = vl1;
3983 }
3984
3985 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3986 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3987 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
3988 break;
3989 if (dl1)
3990 continue;
3991
3992 if (posix_memalign((void **)&dl1, 512,
3993 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
3994 != 0) {
3995 pr_err("%s could not allocate disk info buffer\n",
3996 __func__);
3997 return 3;
3998 }
3999 memcpy(dl1, dl2, sizeof(*dl1));
4000 dl1->mdupdate = NULL;
4001 dl1->next = first->dlist;
4002 dl1->fd = -1;
4003 for (pd = 0; pd < max_pds; pd++)
4004 if (be32_eq(first->phys->entries[pd].refnum,
4005 dl1->disk.refnum))
4006 break;
4007 dl1->pdnum = pd;
4008 if (dl2->spare) {
4009 if (posix_memalign((void **)&dl1->spare, 512,
4010 first->conf_rec_len*512) != 0) {
4011 pr_err("%s could not allocate spare info buf\n",
4012 __func__);
4013 return 3;
4014 }
4015 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
4016 }
4017 for (vd = 0 ; vd < first->max_part ; vd++) {
4018 if (!dl2->vlist[vd]) {
4019 dl1->vlist[vd] = NULL;
4020 continue;
4021 }
4022 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
4023 if (!memcmp(vl1->conf.guid,
4024 dl2->vlist[vd]->conf.guid,
4025 DDF_GUID_LEN))
4026 break;
4027 dl1->vlist[vd] = vl1;
4028 }
4029 }
4030 first->dlist = dl1;
4031 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
4032 be32_to_cpu(dl1->disk.refnum));
4033 }
4034
4035 return 0;
4036 }
4037
4038 #ifndef MDASSEMBLE
4039 /*
4040 * A new array 'a' has been started which claims to be instance 'inst'
4041 * within container 'c'.
4042 * We need to confirm that the array matches the metadata in 'c' so
4043 * that we don't corrupt any metadata.
4044 */
4045 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4046 {
4047 struct ddf_super *ddf = c->sb;
4048 int n = atoi(inst);
4049 struct mdinfo *dev;
4050 struct dl *dl;
4051 static const char faulty[] = "faulty";
4052
4053 if (all_ff(ddf->virt->entries[n].guid)) {
4054 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4055 return -ENODEV;
4056 }
4057 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
4058 guid_str(ddf->virt->entries[n].guid));
4059 for (dev = a->info.devs; dev; dev = dev->next) {
4060 for (dl = ddf->dlist; dl; dl = dl->next)
4061 if (dl->major == dev->disk.major &&
4062 dl->minor == dev->disk.minor)
4063 break;
4064 if (!dl) {
4065 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4066 __func__, dev->disk.major, dev->disk.minor, n);
4067 return -1;
4068 }
4069 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4070 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4071 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4072 __func__, n, dl->major, dl->minor,
4073 be16_to_cpu(
4074 ddf->phys->entries[dl->pdnum].state));
4075 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4076 sizeof(faulty) - 1)
4077 pr_err("Write to state_fd failed\n");
4078 dev->curr_state = DS_FAULTY;
4079 }
4080 }
4081 a->info.container_member = n;
4082 return 0;
4083 }
4084
4085 /*
4086 * The array 'a' is to be marked clean in the metadata.
4087 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4088 * clean up to the point (in sectors). If that cannot be recorded in the
4089 * metadata, then leave it as dirty.
4090 *
4091 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4092 * !global! virtual_disk.virtual_entry structure.
4093 */
4094 static int ddf_set_array_state(struct active_array *a, int consistent)
4095 {
4096 struct ddf_super *ddf = a->container->sb;
4097 int inst = a->info.container_member;
4098 int old = ddf->virt->entries[inst].state;
4099 if (consistent == 2) {
4100 /* Should check if a recovery should be started FIXME */
4101 consistent = 1;
4102 if (!is_resync_complete(&a->info))
4103 consistent = 0;
4104 }
4105 if (consistent)
4106 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4107 else
4108 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4109 if (old != ddf->virt->entries[inst].state)
4110 ddf_set_updates_pending(ddf);
4111
4112 old = ddf->virt->entries[inst].init_state;
4113 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4114 if (is_resync_complete(&a->info))
4115 ddf->virt->entries[inst].init_state |= DDF_init_full;
4116 else if (a->info.resync_start == 0)
4117 ddf->virt->entries[inst].init_state |= DDF_init_not;
4118 else
4119 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4120 if (old != ddf->virt->entries[inst].init_state)
4121 ddf_set_updates_pending(ddf);
4122
4123 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4124 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4125 consistent?"clean":"dirty",
4126 a->info.resync_start);
4127 return consistent;
4128 }
4129
4130 static int get_bvd_state(const struct ddf_super *ddf,
4131 const struct vd_config *vc)
4132 {
4133 unsigned int i, n_bvd, working = 0;
4134 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4135 int pd, st, state;
4136 for (i = 0; i < n_prim; i++) {
4137 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4138 continue;
4139 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4140 if (pd < 0)
4141 continue;
4142 st = be16_to_cpu(ddf->phys->entries[pd].state);
4143 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4144 == DDF_Online)
4145 working++;
4146 }
4147
4148 state = DDF_state_degraded;
4149 if (working == n_prim)
4150 state = DDF_state_optimal;
4151 else
4152 switch (vc->prl) {
4153 case DDF_RAID0:
4154 case DDF_CONCAT:
4155 case DDF_JBOD:
4156 state = DDF_state_failed;
4157 break;
4158 case DDF_RAID1:
4159 if (working == 0)
4160 state = DDF_state_failed;
4161 else if (working >= 2)
4162 state = DDF_state_part_optimal;
4163 break;
4164 case DDF_RAID4:
4165 case DDF_RAID5:
4166 if (working < n_prim - 1)
4167 state = DDF_state_failed;
4168 break;
4169 case DDF_RAID6:
4170 if (working < n_prim - 2)
4171 state = DDF_state_failed;
4172 else if (working == n_prim - 1)
4173 state = DDF_state_part_optimal;
4174 break;
4175 }
4176 return state;
4177 }
4178
4179 static int secondary_state(int state, int other, int seclevel)
4180 {
4181 if (state == DDF_state_optimal && other == DDF_state_optimal)
4182 return DDF_state_optimal;
4183 if (seclevel == DDF_2MIRRORED) {
4184 if (state == DDF_state_optimal || other == DDF_state_optimal)
4185 return DDF_state_part_optimal;
4186 if (state == DDF_state_failed && other == DDF_state_failed)
4187 return DDF_state_failed;
4188 return DDF_state_degraded;
4189 } else {
4190 if (state == DDF_state_failed || other == DDF_state_failed)
4191 return DDF_state_failed;
4192 if (state == DDF_state_degraded || other == DDF_state_degraded)
4193 return DDF_state_degraded;
4194 return DDF_state_part_optimal;
4195 }
4196 }
4197
4198 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4199 {
4200 int state = get_bvd_state(ddf, &vcl->conf);
4201 unsigned int i;
4202 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4203 state = secondary_state(
4204 state,
4205 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4206 vcl->conf.srl);
4207 }
4208 return state;
4209 }
4210
4211 /*
4212 * The state of each disk is stored in the global phys_disk structure
4213 * in phys_disk.entries[n].state.
4214 * This makes various combinations awkward.
4215 * - When a device fails in any array, it must be failed in all arrays
4216 * that include a part of this device.
4217 * - When a component is rebuilding, we cannot include it officially in the
4218 * array unless this is the only array that uses the device.
4219 *
4220 * So: when transitioning:
4221 * Online -> failed, just set failed flag. monitor will propagate
4222 * spare -> online, the device might need to be added to the array.
4223 * spare -> failed, just set failed. Don't worry if in array or not.
4224 */
4225 static void ddf_set_disk(struct active_array *a, int n, int state)
4226 {
4227 struct ddf_super *ddf = a->container->sb;
4228 unsigned int inst = a->info.container_member, n_bvd;
4229 struct vcl *vcl;
4230 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4231 &n_bvd, &vcl);
4232 int pd;
4233 struct mdinfo *mdi;
4234 struct dl *dl;
4235
4236 dprintf("%s: %d to %x\n", __func__, n, state);
4237 if (vc == NULL) {
4238 dprintf("ddf: cannot find instance %d!!\n", inst);
4239 return;
4240 }
4241 /* Find the matching slot in 'info'. */
4242 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4243 if (mdi->disk.raid_disk == n)
4244 break;
4245 if (!mdi) {
4246 pr_err("%s: cannot find raid disk %d\n",
4247 __func__, n);
4248 return;
4249 }
4250
4251 /* and find the 'dl' entry corresponding to that. */
4252 for (dl = ddf->dlist; dl; dl = dl->next)
4253 if (mdi->state_fd >= 0 &&
4254 mdi->disk.major == dl->major &&
4255 mdi->disk.minor == dl->minor)
4256 break;
4257 if (!dl) {
4258 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4259 __func__, n,
4260 mdi->disk.major, mdi->disk.minor);
4261 return;
4262 }
4263
4264 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4265 if (pd < 0 || pd != dl->pdnum) {
4266 /* disk doesn't currently exist or has changed.
4267 * If it is now in_sync, insert it. */
4268 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4269 __func__, dl->pdnum, dl->major, dl->minor,
4270 be32_to_cpu(dl->disk.refnum));
4271 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4272 __func__, inst, n_bvd,
4273 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4274 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4275 pd = dl->pdnum; /* FIXME: is this really correct ? */
4276 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4277 LBA_OFFSET(ddf, vc)[n_bvd] =
4278 cpu_to_be64(mdi->data_offset);
4279 be16_clear(ddf->phys->entries[pd].type,
4280 cpu_to_be16(DDF_Global_Spare));
4281 be16_set(ddf->phys->entries[pd].type,
4282 cpu_to_be16(DDF_Active_in_VD));
4283 ddf_set_updates_pending(ddf);
4284 }
4285 } else {
4286 be16 old = ddf->phys->entries[pd].state;
4287 if (state & DS_FAULTY)
4288 be16_set(ddf->phys->entries[pd].state,
4289 cpu_to_be16(DDF_Failed));
4290 if (state & DS_INSYNC) {
4291 be16_set(ddf->phys->entries[pd].state,
4292 cpu_to_be16(DDF_Online));
4293 be16_clear(ddf->phys->entries[pd].state,
4294 cpu_to_be16(DDF_Rebuilding));
4295 }
4296 if (!be16_eq(old, ddf->phys->entries[pd].state))
4297 ddf_set_updates_pending(ddf);
4298 }
4299
4300 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4301 be32_to_cpu(dl->disk.refnum), state,
4302 be16_to_cpu(ddf->phys->entries[pd].state));
4303
4304 /* Now we need to check the state of the array and update
4305 * virtual_disk.entries[n].state.
4306 * It needs to be one of "optimal", "degraded", "failed".
4307 * I don't understand 'deleted' or 'missing'.
4308 */
4309 state = get_svd_state(ddf, vcl);
4310
4311 if (ddf->virt->entries[inst].state !=
4312 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4313 | state)) {
4314
4315 ddf->virt->entries[inst].state =
4316 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4317 | state;
4318 ddf_set_updates_pending(ddf);
4319 }
4320
4321 }
4322
4323 static void ddf_sync_metadata(struct supertype *st)
4324 {
4325
4326 /*
4327 * Write all data to all devices.
4328 * Later, we might be able to track whether only local changes
4329 * have been made, or whether any global data has been changed,
4330 * but ddf is sufficiently weird that it probably always
4331 * changes global data ....
4332 */
4333 struct ddf_super *ddf = st->sb;
4334 if (!ddf->updates_pending)
4335 return;
4336 ddf->updates_pending = 0;
4337 __write_init_super_ddf(st);
4338 dprintf("ddf: sync_metadata\n");
4339 }
4340
4341 static int del_from_conflist(struct vcl **list, const char *guid)
4342 {
4343 struct vcl **p;
4344 int found = 0;
4345 for (p = list; p && *p; p = &((*p)->next))
4346 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4347 found = 1;
4348 *p = (*p)->next;
4349 }
4350 return found;
4351 }
4352
4353 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4354 {
4355 struct dl *dl;
4356 unsigned int vdnum, i;
4357 vdnum = find_vde_by_guid(ddf, guid);
4358 if (vdnum == DDF_NOTFOUND) {
4359 pr_err("%s: could not find VD %s\n", __func__,
4360 guid_str(guid));
4361 return -1;
4362 }
4363 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4364 pr_err("%s: could not find conf %s\n", __func__,
4365 guid_str(guid));
4366 return -1;
4367 }
4368 for (dl = ddf->dlist; dl; dl = dl->next)
4369 for (i = 0; i < ddf->max_part; i++)
4370 if (dl->vlist[i] != NULL &&
4371 !memcmp(dl->vlist[i]->conf.guid, guid,
4372 DDF_GUID_LEN))
4373 dl->vlist[i] = NULL;
4374 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4375 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4376 return 0;
4377 }
4378
4379 static int kill_subarray_ddf(struct supertype *st)
4380 {
4381 struct ddf_super *ddf = st->sb;
4382 /*
4383 * currentconf is set in container_content_ddf,
4384 * called with subarray arg
4385 */
4386 struct vcl *victim = ddf->currentconf;
4387 struct vd_config *conf;
4388 ddf->currentconf = NULL;
4389 unsigned int vdnum;
4390 if (!victim) {
4391 pr_err("%s: nothing to kill\n", __func__);
4392 return -1;
4393 }
4394 conf = &victim->conf;
4395 vdnum = find_vde_by_guid(ddf, conf->guid);
4396 if (vdnum == DDF_NOTFOUND) {
4397 pr_err("%s: could not find VD %s\n", __func__,
4398 guid_str(conf->guid));
4399 return -1;
4400 }
4401 if (st->update_tail) {
4402 struct virtual_disk *vd;
4403 int len = sizeof(struct virtual_disk)
4404 + sizeof(struct virtual_entry);
4405 vd = xmalloc(len);
4406 if (vd == NULL) {
4407 pr_err("%s: failed to allocate %d bytes\n", __func__,
4408 len);
4409 return -1;
4410 }
4411 memset(vd, 0 , len);
4412 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4413 vd->populated_vdes = cpu_to_be16(0);
4414 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4415 /* we use DDF_state_deleted as marker */
4416 vd->entries[0].state = DDF_state_deleted;
4417 append_metadata_update(st, vd, len);
4418 } else {
4419 _kill_subarray_ddf(ddf, conf->guid);
4420 ddf_set_updates_pending(ddf);
4421 ddf_sync_metadata(st);
4422 }
4423 return 0;
4424 }
4425
4426 static void copy_matching_bvd(struct ddf_super *ddf,
4427 struct vd_config *conf,
4428 const struct metadata_update *update)
4429 {
4430 unsigned int mppe =
4431 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4432 unsigned int len = ddf->conf_rec_len * 512;
4433 char *p;
4434 struct vd_config *vc;
4435 for (p = update->buf; p < update->buf + update->len; p += len) {
4436 vc = (struct vd_config *) p;
4437 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4438 memcpy(conf->phys_refnum, vc->phys_refnum,
4439 mppe * (sizeof(__u32) + sizeof(__u64)));
4440 return;
4441 }
4442 }
4443 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4444 conf->sec_elmnt_seq, guid_str(conf->guid));
4445 }
4446
4447 static void ddf_process_update(struct supertype *st,
4448 struct metadata_update *update)
4449 {
4450 /* Apply this update to the metadata.
4451 * The first 4 bytes are a DDF_*_MAGIC which guides
4452 * our actions.
4453 * Possible update are:
4454 * DDF_PHYS_RECORDS_MAGIC
4455 * Add a new physical device or remove an old one.
4456 * Changes to this record only happen implicitly.
4457 * used_pdes is the device number.
4458 * DDF_VIRT_RECORDS_MAGIC
4459 * Add a new VD. Possibly also change the 'access' bits.
4460 * populated_vdes is the entry number.
4461 * DDF_VD_CONF_MAGIC
4462 * New or updated VD. the VIRT_RECORD must already
4463 * exist. For an update, phys_refnum and lba_offset
4464 * (at least) are updated, and the VD_CONF must
4465 * be written to precisely those devices listed with
4466 * a phys_refnum.
4467 * DDF_SPARE_ASSIGN_MAGIC
4468 * replacement Spare Assignment Record... but for which device?
4469 *
4470 * So, e.g.:
4471 * - to create a new array, we send a VIRT_RECORD and
4472 * a VD_CONF. Then assemble and start the array.
4473 * - to activate a spare we send a VD_CONF to add the phys_refnum
4474 * and offset. This will also mark the spare as active with
4475 * a spare-assignment record.
4476 */
4477 struct ddf_super *ddf = st->sb;
4478 be32 *magic = (be32 *)update->buf;
4479 struct phys_disk *pd;
4480 struct virtual_disk *vd;
4481 struct vd_config *vc;
4482 struct vcl *vcl;
4483 struct dl *dl;
4484 unsigned int ent;
4485 unsigned int pdnum, pd2, len;
4486
4487 dprintf("Process update %x\n", be32_to_cpu(*magic));
4488
4489 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4490
4491 if (update->len != (sizeof(struct phys_disk) +
4492 sizeof(struct phys_disk_entry)))
4493 return;
4494 pd = (struct phys_disk*)update->buf;
4495
4496 ent = be16_to_cpu(pd->used_pdes);
4497 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4498 return;
4499 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4500 struct dl **dlp;
4501 /* removing this disk. */
4502 be16_set(ddf->phys->entries[ent].state,
4503 cpu_to_be16(DDF_Missing));
4504 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4505 struct dl *dl = *dlp;
4506 if (dl->pdnum == (signed)ent) {
4507 close(dl->fd);
4508 dl->fd = -1;
4509 /* FIXME this doesn't free
4510 * dl->devname */
4511 update->space = dl;
4512 *dlp = dl->next;
4513 break;
4514 }
4515 }
4516 ddf_set_updates_pending(ddf);
4517 return;
4518 }
4519 if (!all_ff(ddf->phys->entries[ent].guid))
4520 return;
4521 ddf->phys->entries[ent] = pd->entries[0];
4522 ddf->phys->used_pdes = cpu_to_be16
4523 (1 + be16_to_cpu(ddf->phys->used_pdes));
4524 ddf_set_updates_pending(ddf);
4525 if (ddf->add_list) {
4526 struct active_array *a;
4527 struct dl *al = ddf->add_list;
4528 ddf->add_list = al->next;
4529
4530 al->next = ddf->dlist;
4531 ddf->dlist = al;
4532
4533 /* As a device has been added, we should check
4534 * for any degraded devices that might make
4535 * use of this spare */
4536 for (a = st->arrays ; a; a=a->next)
4537 a->check_degraded = 1;
4538 }
4539 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4540
4541 if (update->len != (sizeof(struct virtual_disk) +
4542 sizeof(struct virtual_entry)))
4543 return;
4544 vd = (struct virtual_disk*)update->buf;
4545
4546 if (vd->entries[0].state == DDF_state_deleted) {
4547 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4548 return;
4549 } else {
4550
4551 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4552 if (ent != DDF_NOTFOUND) {
4553 dprintf("%s: VD %s exists already in slot %d\n",
4554 __func__, guid_str(vd->entries[0].guid),
4555 ent);
4556 return;
4557 }
4558 ent = find_unused_vde(ddf);
4559 if (ent == DDF_NOTFOUND)
4560 return;
4561 ddf->virt->entries[ent] = vd->entries[0];
4562 ddf->virt->populated_vdes =
4563 cpu_to_be16(
4564 1 + be16_to_cpu(
4565 ddf->virt->populated_vdes));
4566 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4567 __func__, guid_str(vd->entries[0].guid), ent,
4568 ddf->virt->entries[ent].state,
4569 ddf->virt->entries[ent].init_state);
4570 }
4571 ddf_set_updates_pending(ddf);
4572 }
4573
4574 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4575 vc = (struct vd_config*)update->buf;
4576 len = ddf->conf_rec_len * 512;
4577 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4578 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4579 __func__, guid_str(vc->guid), update->len,
4580 vc->sec_elmnt_count);
4581 return;
4582 }
4583 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4584 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4585 break;
4586 dprintf("%s: conf update for %s (%s)\n", __func__,
4587 guid_str(vc->guid), (vcl ? "old" : "new"));
4588 if (vcl) {
4589 /* An update, just copy the phys_refnum and lba_offset
4590 * fields
4591 */
4592 unsigned int i;
4593 unsigned int k;
4594 copy_matching_bvd(ddf, &vcl->conf, update);
4595 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4596 dprintf("BVD %u has %08x at %llu\n", 0,
4597 be32_to_cpu(vcl->conf.phys_refnum[k]),
4598 be64_to_cpu(LBA_OFFSET(ddf,
4599 &vcl->conf)[k]));
4600 for (i = 1; i < vc->sec_elmnt_count; i++) {
4601 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4602 update);
4603 for (k = 0; k < be16_to_cpu(
4604 vc->prim_elmnt_count); k++)
4605 dprintf("BVD %u has %08x at %llu\n", i,
4606 be32_to_cpu
4607 (vcl->other_bvds[i-1]->
4608 phys_refnum[k]),
4609 be64_to_cpu
4610 (LBA_OFFSET
4611 (ddf,
4612 vcl->other_bvds[i-1])[k]));
4613 }
4614 } else {
4615 /* A new VD_CONF */
4616 unsigned int i;
4617 if (!update->space)
4618 return;
4619 vcl = update->space;
4620 update->space = NULL;
4621 vcl->next = ddf->conflist;
4622 memcpy(&vcl->conf, vc, len);
4623 ent = find_vde_by_guid(ddf, vc->guid);
4624 if (ent == DDF_NOTFOUND)
4625 return;
4626 vcl->vcnum = ent;
4627 ddf->conflist = vcl;
4628 for (i = 1; i < vc->sec_elmnt_count; i++)
4629 memcpy(vcl->other_bvds[i-1],
4630 update->buf + len * i, len);
4631 }
4632 /* Set DDF_Transition on all Failed devices - to help
4633 * us detect those that are no longer in use
4634 */
4635 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4636 pdnum++)
4637 if (be16_and(ddf->phys->entries[pdnum].state,
4638 cpu_to_be16(DDF_Failed)))
4639 be16_set(ddf->phys->entries[pdnum].state,
4640 cpu_to_be16(DDF_Transition));
4641 /* Now make sure vlist is correct for each dl. */
4642 for (dl = ddf->dlist; dl; dl = dl->next) {
4643 unsigned int vn = 0;
4644 int in_degraded = 0;
4645 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4646 unsigned int dn, ibvd;
4647 const struct vd_config *conf;
4648 int vstate;
4649 dn = get_pd_index_from_refnum(vcl,
4650 dl->disk.refnum,
4651 ddf->mppe,
4652 &conf, &ibvd);
4653 if (dn == DDF_NOTFOUND)
4654 continue;
4655 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4656 dl->pdnum,
4657 be32_to_cpu(dl->disk.refnum),
4658 guid_str(conf->guid),
4659 conf->sec_elmnt_seq, vn);
4660 /* Clear the Transition flag */
4661 if (be16_and
4662 (ddf->phys->entries[dl->pdnum].state,
4663 cpu_to_be16(DDF_Failed)))
4664 be16_clear(ddf->phys
4665 ->entries[dl->pdnum].state,
4666 cpu_to_be16(DDF_Transition));
4667 dl->vlist[vn++] = vcl;
4668 vstate = ddf->virt->entries[vcl->vcnum].state
4669 & DDF_state_mask;
4670 if (vstate == DDF_state_degraded ||
4671 vstate == DDF_state_part_optimal)
4672 in_degraded = 1;
4673 }
4674 while (vn < ddf->max_part)
4675 dl->vlist[vn++] = NULL;
4676 if (dl->vlist[0]) {
4677 be16_clear(ddf->phys->entries[dl->pdnum].type,
4678 cpu_to_be16(DDF_Global_Spare));
4679 if (!be16_and(ddf->phys
4680 ->entries[dl->pdnum].type,
4681 cpu_to_be16(DDF_Active_in_VD))) {
4682 be16_set(ddf->phys
4683 ->entries[dl->pdnum].type,
4684 cpu_to_be16(DDF_Active_in_VD));
4685 if (in_degraded)
4686 be16_set(ddf->phys
4687 ->entries[dl->pdnum]
4688 .state,
4689 cpu_to_be16
4690 (DDF_Rebuilding));
4691 }
4692 }
4693 if (dl->spare) {
4694 be16_clear(ddf->phys->entries[dl->pdnum].type,
4695 cpu_to_be16(DDF_Global_Spare));
4696 be16_set(ddf->phys->entries[dl->pdnum].type,
4697 cpu_to_be16(DDF_Spare));
4698 }
4699 if (!dl->vlist[0] && !dl->spare) {
4700 be16_set(ddf->phys->entries[dl->pdnum].type,
4701 cpu_to_be16(DDF_Global_Spare));
4702 be16_clear(ddf->phys->entries[dl->pdnum].type,
4703 cpu_to_be16(DDF_Spare));
4704 be16_clear(ddf->phys->entries[dl->pdnum].type,
4705 cpu_to_be16(DDF_Active_in_VD));
4706 }
4707 }
4708
4709 /* Now remove any 'Failed' devices that are not part
4710 * of any VD. They will have the Transition flag set.
4711 * Once done, we need to update all dl->pdnum numbers.
4712 */
4713 pd2 = 0;
4714 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4715 pdnum++) {
4716 if (be16_and(ddf->phys->entries[pdnum].state,
4717 cpu_to_be16(DDF_Failed))
4718 && be16_and(ddf->phys->entries[pdnum].state,
4719 cpu_to_be16(DDF_Transition))) {
4720 /* skip this one unless in dlist*/
4721 for (dl = ddf->dlist; dl; dl = dl->next)
4722 if (dl->pdnum == (int)pdnum)
4723 break;
4724 if (!dl)
4725 continue;
4726 }
4727 if (pdnum == pd2)
4728 pd2++;
4729 else {
4730 ddf->phys->entries[pd2] =
4731 ddf->phys->entries[pdnum];
4732 for (dl = ddf->dlist; dl; dl = dl->next)
4733 if (dl->pdnum == (int)pdnum)
4734 dl->pdnum = pd2;
4735 pd2++;
4736 }
4737 }
4738 ddf->phys->used_pdes = cpu_to_be16(pd2);
4739 while (pd2 < pdnum) {
4740 memset(ddf->phys->entries[pd2].guid, 0xff,
4741 DDF_GUID_LEN);
4742 pd2++;
4743 }
4744
4745 ddf_set_updates_pending(ddf);
4746 }
4747 /* case DDF_SPARE_ASSIGN_MAGIC */
4748 }
4749
4750 static void ddf_prepare_update(struct supertype *st,
4751 struct metadata_update *update)
4752 {
4753 /* This update arrived at managemon.
4754 * We are about to pass it to monitor.
4755 * If a malloc is needed, do it here.
4756 */
4757 struct ddf_super *ddf = st->sb;
4758 be32 *magic = (be32 *)update->buf;
4759 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4760 struct vcl *vcl;
4761 struct vd_config *conf = (struct vd_config *) update->buf;
4762 if (posix_memalign(&update->space, 512,
4763 offsetof(struct vcl, conf)
4764 + ddf->conf_rec_len * 512) != 0) {
4765 update->space = NULL;
4766 return;
4767 }
4768 vcl = update->space;
4769 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4770 if (alloc_other_bvds(ddf, vcl) != 0) {
4771 free(update->space);
4772 update->space = NULL;
4773 }
4774 }
4775 }
4776
4777 /*
4778 * Check degraded state of a RAID10.
4779 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4780 */
4781 static int raid10_degraded(struct mdinfo *info)
4782 {
4783 int n_prim, n_bvds;
4784 int i;
4785 struct mdinfo *d;
4786 char *found;
4787 int ret = -1;
4788
4789 n_prim = info->array.layout & ~0x100;
4790 n_bvds = info->array.raid_disks / n_prim;
4791 found = xmalloc(n_bvds);
4792 if (found == NULL)
4793 return ret;
4794 memset(found, 0, n_bvds);
4795 for (d = info->devs; d; d = d->next) {
4796 i = d->disk.raid_disk / n_prim;
4797 if (i >= n_bvds) {
4798 pr_err("%s: BUG: invalid raid disk\n", __func__);
4799 goto out;
4800 }
4801 if (d->state_fd > 0)
4802 found[i]++;
4803 }
4804 ret = 2;
4805 for (i = 0; i < n_bvds; i++)
4806 if (!found[i]) {
4807 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4808 ret = 0;
4809 goto out;
4810 } else if (found[i] < n_prim) {
4811 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4812 n_bvds);
4813 ret = 1;
4814 }
4815 out:
4816 free(found);
4817 return ret;
4818 }
4819
4820 /*
4821 * Check if the array 'a' is degraded but not failed.
4822 * If it is, find as many spares as are available and needed and
4823 * arrange for their inclusion.
4824 * We only choose devices which are not already in the array,
4825 * and prefer those with a spare-assignment to this array.
4826 * otherwise we choose global spares - assuming always that
4827 * there is enough room.
4828 * For each spare that we assign, we return an 'mdinfo' which
4829 * describes the position for the device in the array.
4830 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4831 * the new phys_refnum and lba_offset values.
4832 *
4833 * Only worry about BVDs at the moment.
4834 */
4835 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4836 struct metadata_update **updates)
4837 {
4838 int working = 0;
4839 struct mdinfo *d;
4840 struct ddf_super *ddf = a->container->sb;
4841 int global_ok = 0;
4842 struct mdinfo *rv = NULL;
4843 struct mdinfo *di;
4844 struct metadata_update *mu;
4845 struct dl *dl;
4846 int i;
4847 unsigned int j;
4848 struct vcl *vcl;
4849 struct vd_config *vc;
4850 unsigned int n_bvd;
4851
4852 for (d = a->info.devs ; d ; d = d->next) {
4853 if ((d->curr_state & DS_FAULTY) &&
4854 d->state_fd >= 0)
4855 /* wait for Removal to happen */
4856 return NULL;
4857 if (d->state_fd >= 0)
4858 working ++;
4859 }
4860
4861 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4862 a->info.array.raid_disks,
4863 a->info.array.level);
4864 if (working == a->info.array.raid_disks)
4865 return NULL; /* array not degraded */
4866 switch (a->info.array.level) {
4867 case 1:
4868 if (working == 0)
4869 return NULL; /* failed */
4870 break;
4871 case 4:
4872 case 5:
4873 if (working < a->info.array.raid_disks - 1)
4874 return NULL; /* failed */
4875 break;
4876 case 6:
4877 if (working < a->info.array.raid_disks - 2)
4878 return NULL; /* failed */
4879 break;
4880 case 10:
4881 if (raid10_degraded(&a->info) < 1)
4882 return NULL;
4883 break;
4884 default: /* concat or stripe */
4885 return NULL; /* failed */
4886 }
4887
4888 /* For each slot, if it is not working, find a spare */
4889 dl = ddf->dlist;
4890 for (i = 0; i < a->info.array.raid_disks; i++) {
4891 for (d = a->info.devs ; d ; d = d->next)
4892 if (d->disk.raid_disk == i)
4893 break;
4894 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4895 if (d && (d->state_fd >= 0))
4896 continue;
4897
4898 /* OK, this device needs recovery. Find a spare */
4899 again:
4900 for ( ; dl ; dl = dl->next) {
4901 unsigned long long esize;
4902 unsigned long long pos;
4903 struct mdinfo *d2;
4904 int is_global = 0;
4905 int is_dedicated = 0;
4906 struct extent *ex;
4907 unsigned int j;
4908 be16 state = ddf->phys->entries[dl->pdnum].state;
4909 if (be16_and(state,
4910 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4911 !be16_and(state,
4912 cpu_to_be16(DDF_Online)))
4913 continue;
4914
4915 /* If in this array, skip */
4916 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4917 if (d2->state_fd >= 0 &&
4918 d2->disk.major == dl->major &&
4919 d2->disk.minor == dl->minor) {
4920 dprintf("%x:%x (%08x) already in array\n",
4921 dl->major, dl->minor,
4922 be32_to_cpu(dl->disk.refnum));
4923 break;
4924 }
4925 if (d2)
4926 continue;
4927 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4928 cpu_to_be16(DDF_Spare))) {
4929 /* Check spare assign record */
4930 if (dl->spare) {
4931 if (dl->spare->type & DDF_spare_dedicated) {
4932 /* check spare_ents for guid */
4933 for (j = 0 ;
4934 j < be16_to_cpu
4935 (dl->spare
4936 ->populated);
4937 j++) {
4938 if (memcmp(dl->spare->spare_ents[j].guid,
4939 ddf->virt->entries[a->info.container_member].guid,
4940 DDF_GUID_LEN) == 0)
4941 is_dedicated = 1;
4942 }
4943 } else
4944 is_global = 1;
4945 }
4946 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
4947 cpu_to_be16(DDF_Global_Spare))) {
4948 is_global = 1;
4949 } else if (!be16_and(ddf->phys
4950 ->entries[dl->pdnum].state,
4951 cpu_to_be16(DDF_Failed))) {
4952 /* we can possibly use some of this */
4953 is_global = 1;
4954 }
4955 if ( ! (is_dedicated ||
4956 (is_global && global_ok))) {
4957 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4958 is_dedicated, is_global);
4959 continue;
4960 }
4961
4962 /* We are allowed to use this device - is there space?
4963 * We need a->info.component_size sectors */
4964 ex = get_extents(ddf, dl);
4965 if (!ex) {
4966 dprintf("cannot get extents\n");
4967 continue;
4968 }
4969 j = 0; pos = 0;
4970 esize = 0;
4971
4972 do {
4973 esize = ex[j].start - pos;
4974 if (esize >= a->info.component_size)
4975 break;
4976 pos = ex[j].start + ex[j].size;
4977 j++;
4978 } while (ex[j-1].size);
4979
4980 free(ex);
4981 if (esize < a->info.component_size) {
4982 dprintf("%x:%x has no room: %llu %llu\n",
4983 dl->major, dl->minor,
4984 esize, a->info.component_size);
4985 /* No room */
4986 continue;
4987 }
4988
4989 /* Cool, we have a device with some space at pos */
4990 di = xcalloc(1, sizeof(*di));
4991 di->disk.number = i;
4992 di->disk.raid_disk = i;
4993 di->disk.major = dl->major;
4994 di->disk.minor = dl->minor;
4995 di->disk.state = 0;
4996 di->recovery_start = 0;
4997 di->data_offset = pos;
4998 di->component_size = a->info.component_size;
4999 di->container_member = dl->pdnum;
5000 di->next = rv;
5001 rv = di;
5002 dprintf("%x:%x (%08x) to be %d at %llu\n",
5003 dl->major, dl->minor,
5004 be32_to_cpu(dl->disk.refnum), i, pos);
5005
5006 break;
5007 }
5008 if (!dl && ! global_ok) {
5009 /* not enough dedicated spares, try global */
5010 global_ok = 1;
5011 dl = ddf->dlist;
5012 goto again;
5013 }
5014 }
5015
5016 if (!rv)
5017 /* No spares found */
5018 return rv;
5019 /* Now 'rv' has a list of devices to return.
5020 * Create a metadata_update record to update the
5021 * phys_refnum and lba_offset values
5022 */
5023 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5024 &n_bvd, &vcl);
5025 if (vc == NULL)
5026 return NULL;
5027
5028 mu = xmalloc(sizeof(*mu));
5029 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5030 free(mu);
5031 mu = NULL;
5032 }
5033
5034 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5035 mu->buf = xmalloc(mu->len);
5036 mu->space = NULL;
5037 mu->space_list = NULL;
5038 mu->next = *updates;
5039 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5040 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5041 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5042 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5043
5044 vc = (struct vd_config*)mu->buf;
5045 for (di = rv ; di ; di = di->next) {
5046 unsigned int i_sec, i_prim;
5047 i_sec = di->disk.raid_disk
5048 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5049 i_prim = di->disk.raid_disk
5050 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5051 vc = (struct vd_config *)(mu->buf
5052 + i_sec * ddf->conf_rec_len * 512);
5053 for (dl = ddf->dlist; dl; dl = dl->next)
5054 if (dl->major == di->disk.major
5055 && dl->minor == di->disk.minor)
5056 break;
5057 if (!dl) {
5058 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
5059 __func__, di->disk.raid_disk,
5060 di->disk.major, di->disk.minor);
5061 return NULL;
5062 }
5063 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5064 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5065 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5066 be32_to_cpu(vc->phys_refnum[i_prim]),
5067 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5068 }
5069 *updates = mu;
5070 return rv;
5071 }
5072 #endif /* MDASSEMBLE */
5073
5074 static int ddf_level_to_layout(int level)
5075 {
5076 switch(level) {
5077 case 0:
5078 case 1:
5079 return 0;
5080 case 5:
5081 return ALGORITHM_LEFT_SYMMETRIC;
5082 case 6:
5083 return ALGORITHM_ROTATING_N_CONTINUE;
5084 case 10:
5085 return 0x102;
5086 default:
5087 return UnSet;
5088 }
5089 }
5090
5091 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5092 {
5093 if (level && *level == UnSet)
5094 *level = LEVEL_CONTAINER;
5095
5096 if (level && layout && *layout == UnSet)
5097 *layout = ddf_level_to_layout(*level);
5098 }
5099
5100 struct superswitch super_ddf = {
5101 #ifndef MDASSEMBLE
5102 .examine_super = examine_super_ddf,
5103 .brief_examine_super = brief_examine_super_ddf,
5104 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5105 .export_examine_super = export_examine_super_ddf,
5106 .detail_super = detail_super_ddf,
5107 .brief_detail_super = brief_detail_super_ddf,
5108 .validate_geometry = validate_geometry_ddf,
5109 .write_init_super = write_init_super_ddf,
5110 .add_to_super = add_to_super_ddf,
5111 .remove_from_super = remove_from_super_ddf,
5112 .load_container = load_container_ddf,
5113 .copy_metadata = copy_metadata_ddf,
5114 .kill_subarray = kill_subarray_ddf,
5115 #endif
5116 .match_home = match_home_ddf,
5117 .uuid_from_super= uuid_from_super_ddf,
5118 .getinfo_super = getinfo_super_ddf,
5119 .update_super = update_super_ddf,
5120
5121 .avail_size = avail_size_ddf,
5122
5123 .compare_super = compare_super_ddf,
5124
5125 .load_super = load_super_ddf,
5126 .init_super = init_super_ddf,
5127 .store_super = store_super_ddf,
5128 .free_super = free_super_ddf,
5129 .match_metadata_desc = match_metadata_desc_ddf,
5130 .container_content = container_content_ddf,
5131 .default_geometry = default_geometry_ddf,
5132
5133 .external = 1,
5134
5135 #ifndef MDASSEMBLE
5136 /* for mdmon */
5137 .open_new = ddf_open_new,
5138 .set_array_state= ddf_set_array_state,
5139 .set_disk = ddf_set_disk,
5140 .sync_metadata = ddf_sync_metadata,
5141 .process_update = ddf_process_update,
5142 .prepare_update = ddf_prepare_update,
5143 .activate_spare = ddf_activate_spare,
5144 #endif
5145 .name = "ddf",
5146 };