]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
tests: add test that DDF marks missing devices as failed on assembly.
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* Default for safe_mode_delay. Same value as for IMSM.
51 */
52 static const int DDF_SAFE_MODE_DELAY = 4000;
53
54 /* The DDF metadata handling.
55 * DDF metadata lives at the end of the device.
56 * The last 512 byte block provides an 'anchor' which is used to locate
57 * the rest of the metadata which usually lives immediately behind the anchor.
58 *
59 * Note:
60 * - all multibyte numeric fields are bigendian.
61 * - all strings are space padded.
62 *
63 */
64
65 typedef struct __be16 {
66 __u16 _v16;
67 } be16;
68 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
69 #define be16_and(x, y) ((x)._v16 & (y)._v16)
70 #define be16_or(x, y) ((x)._v16 | (y)._v16)
71 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
72 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
73
74 typedef struct __be32 {
75 __u32 _v32;
76 } be32;
77 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
78
79 typedef struct __be64 {
80 __u64 _v64;
81 } be64;
82 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
83
84 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
85 static inline be16 cpu_to_be16(__u16 x)
86 {
87 be16 be = { ._v16 = __cpu_to_be16(x) };
88 return be;
89 }
90
91 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
92 static inline be32 cpu_to_be32(__u32 x)
93 {
94 be32 be = { ._v32 = __cpu_to_be32(x) };
95 return be;
96 }
97
98 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
99 static inline be64 cpu_to_be64(__u64 x)
100 {
101 be64 be = { ._v64 = __cpu_to_be64(x) };
102 return be;
103 }
104
105 /* Primary Raid Level (PRL) */
106 #define DDF_RAID0 0x00
107 #define DDF_RAID1 0x01
108 #define DDF_RAID3 0x03
109 #define DDF_RAID4 0x04
110 #define DDF_RAID5 0x05
111 #define DDF_RAID1E 0x11
112 #define DDF_JBOD 0x0f
113 #define DDF_CONCAT 0x1f
114 #define DDF_RAID5E 0x15
115 #define DDF_RAID5EE 0x25
116 #define DDF_RAID6 0x06
117
118 /* Raid Level Qualifier (RLQ) */
119 #define DDF_RAID0_SIMPLE 0x00
120 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
121 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
122 #define DDF_RAID3_0 0x00 /* parity in first extent */
123 #define DDF_RAID3_N 0x01 /* parity in last extent */
124 #define DDF_RAID4_0 0x00 /* parity in first extent */
125 #define DDF_RAID4_N 0x01 /* parity in last extent */
126 /* these apply to raid5e and raid5ee as well */
127 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
128 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
129 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
130 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
131
132 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
133 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
134
135 /* Secondary RAID Level (SRL) */
136 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
137 #define DDF_2MIRRORED 0x01
138 #define DDF_2CONCAT 0x02
139 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
140
141 /* Magic numbers */
142 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
143 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
144 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
145 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
146 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
147 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
148 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
149 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
150 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
151 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
152
153 #define DDF_GUID_LEN 24
154 #define DDF_REVISION_0 "01.00.00"
155 #define DDF_REVISION_2 "01.02.00"
156
157 struct ddf_header {
158 be32 magic; /* DDF_HEADER_MAGIC */
159 be32 crc;
160 char guid[DDF_GUID_LEN];
161 char revision[8]; /* 01.02.00 */
162 be32 seq; /* starts at '1' */
163 be32 timestamp;
164 __u8 openflag;
165 __u8 foreignflag;
166 __u8 enforcegroups;
167 __u8 pad0; /* 0xff */
168 __u8 pad1[12]; /* 12 * 0xff */
169 /* 64 bytes so far */
170 __u8 header_ext[32]; /* reserved: fill with 0xff */
171 be64 primary_lba;
172 be64 secondary_lba;
173 __u8 type;
174 __u8 pad2[3]; /* 0xff */
175 be32 workspace_len; /* sectors for vendor space -
176 * at least 32768(sectors) */
177 be64 workspace_lba;
178 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
179 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
180 be16 max_partitions; /* i.e. max num of configuration
181 record entries per disk */
182 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
183 *12/512) */
184 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
185 __u8 pad3[54]; /* 0xff */
186 /* 192 bytes so far */
187 be32 controller_section_offset;
188 be32 controller_section_length;
189 be32 phys_section_offset;
190 be32 phys_section_length;
191 be32 virt_section_offset;
192 be32 virt_section_length;
193 be32 config_section_offset;
194 be32 config_section_length;
195 be32 data_section_offset;
196 be32 data_section_length;
197 be32 bbm_section_offset;
198 be32 bbm_section_length;
199 be32 diag_space_offset;
200 be32 diag_space_length;
201 be32 vendor_offset;
202 be32 vendor_length;
203 /* 256 bytes so far */
204 __u8 pad4[256]; /* 0xff */
205 };
206
207 /* type field */
208 #define DDF_HEADER_ANCHOR 0x00
209 #define DDF_HEADER_PRIMARY 0x01
210 #define DDF_HEADER_SECONDARY 0x02
211
212 /* The content of the 'controller section' - global scope */
213 struct ddf_controller_data {
214 be32 magic; /* DDF_CONTROLLER_MAGIC */
215 be32 crc;
216 char guid[DDF_GUID_LEN];
217 struct controller_type {
218 be16 vendor_id;
219 be16 device_id;
220 be16 sub_vendor_id;
221 be16 sub_device_id;
222 } type;
223 char product_id[16];
224 __u8 pad[8]; /* 0xff */
225 __u8 vendor_data[448];
226 };
227
228 /* The content of phys_section - global scope */
229 struct phys_disk {
230 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
231 be32 crc;
232 be16 used_pdes;
233 be16 max_pdes;
234 __u8 pad[52];
235 struct phys_disk_entry {
236 char guid[DDF_GUID_LEN];
237 be32 refnum;
238 be16 type;
239 be16 state;
240 be64 config_size; /* DDF structures must be after here */
241 char path[18]; /* another horrible structure really */
242 __u8 pad[6];
243 } entries[0];
244 };
245
246 /* phys_disk_entry.type is a bitmap - bigendian remember */
247 #define DDF_Forced_PD_GUID 1
248 #define DDF_Active_in_VD 2
249 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
250 #define DDF_Spare 8 /* overrides Global_spare */
251 #define DDF_Foreign 16
252 #define DDF_Legacy 32 /* no DDF on this device */
253
254 #define DDF_Interface_mask 0xf00
255 #define DDF_Interface_SCSI 0x100
256 #define DDF_Interface_SAS 0x200
257 #define DDF_Interface_SATA 0x300
258 #define DDF_Interface_FC 0x400
259
260 /* phys_disk_entry.state is a bigendian bitmap */
261 #define DDF_Online 1
262 #define DDF_Failed 2 /* overrides 1,4,8 */
263 #define DDF_Rebuilding 4
264 #define DDF_Transition 8
265 #define DDF_SMART 16
266 #define DDF_ReadErrors 32
267 #define DDF_Missing 64
268
269 /* The content of the virt_section global scope */
270 struct virtual_disk {
271 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
272 be32 crc;
273 be16 populated_vdes;
274 be16 max_vdes;
275 __u8 pad[52];
276 struct virtual_entry {
277 char guid[DDF_GUID_LEN];
278 be16 unit;
279 __u16 pad0; /* 0xffff */
280 be16 guid_crc;
281 be16 type;
282 __u8 state;
283 __u8 init_state;
284 __u8 pad1[14];
285 char name[16];
286 } entries[0];
287 };
288
289 /* virtual_entry.type is a bitmap - bigendian */
290 #define DDF_Shared 1
291 #define DDF_Enforce_Groups 2
292 #define DDF_Unicode 4
293 #define DDF_Owner_Valid 8
294
295 /* virtual_entry.state is a bigendian bitmap */
296 #define DDF_state_mask 0x7
297 #define DDF_state_optimal 0x0
298 #define DDF_state_degraded 0x1
299 #define DDF_state_deleted 0x2
300 #define DDF_state_missing 0x3
301 #define DDF_state_failed 0x4
302 #define DDF_state_part_optimal 0x5
303
304 #define DDF_state_morphing 0x8
305 #define DDF_state_inconsistent 0x10
306
307 /* virtual_entry.init_state is a bigendian bitmap */
308 #define DDF_initstate_mask 0x03
309 #define DDF_init_not 0x00
310 #define DDF_init_quick 0x01 /* initialisation is progress.
311 * i.e. 'state_inconsistent' */
312 #define DDF_init_full 0x02
313
314 #define DDF_access_mask 0xc0
315 #define DDF_access_rw 0x00
316 #define DDF_access_ro 0x80
317 #define DDF_access_blocked 0xc0
318
319 /* The content of the config_section - local scope
320 * It has multiple records each config_record_len sectors
321 * They can be vd_config or spare_assign
322 */
323
324 struct vd_config {
325 be32 magic; /* DDF_VD_CONF_MAGIC */
326 be32 crc;
327 char guid[DDF_GUID_LEN];
328 be32 timestamp;
329 be32 seqnum;
330 __u8 pad0[24];
331 be16 prim_elmnt_count;
332 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
333 __u8 prl;
334 __u8 rlq;
335 __u8 sec_elmnt_count;
336 __u8 sec_elmnt_seq;
337 __u8 srl;
338 be64 blocks; /* blocks per component could be different
339 * on different component devices...(only
340 * for concat I hope) */
341 be64 array_blocks; /* blocks in array */
342 __u8 pad1[8];
343 be32 spare_refs[8];
344 __u8 cache_pol[8];
345 __u8 bg_rate;
346 __u8 pad2[3];
347 __u8 pad3[52];
348 __u8 pad4[192];
349 __u8 v0[32]; /* reserved- 0xff */
350 __u8 v1[32]; /* reserved- 0xff */
351 __u8 v2[16]; /* reserved- 0xff */
352 __u8 v3[16]; /* reserved- 0xff */
353 __u8 vendor[32];
354 be32 phys_refnum[0]; /* refnum of each disk in sequence */
355 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
356 bvd are always the same size */
357 };
358 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
359
360 /* vd_config.cache_pol[7] is a bitmap */
361 #define DDF_cache_writeback 1 /* else writethrough */
362 #define DDF_cache_wadaptive 2 /* only applies if writeback */
363 #define DDF_cache_readahead 4
364 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
365 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
366 #define DDF_cache_wallowed 32 /* enable write caching */
367 #define DDF_cache_rallowed 64 /* enable read caching */
368
369 struct spare_assign {
370 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
371 be32 crc;
372 be32 timestamp;
373 __u8 reserved[7];
374 __u8 type;
375 be16 populated; /* SAEs used */
376 be16 max; /* max SAEs */
377 __u8 pad[8];
378 struct spare_assign_entry {
379 char guid[DDF_GUID_LEN];
380 be16 secondary_element;
381 __u8 pad[6];
382 } spare_ents[0];
383 };
384 /* spare_assign.type is a bitmap */
385 #define DDF_spare_dedicated 0x1 /* else global */
386 #define DDF_spare_revertible 0x2 /* else committable */
387 #define DDF_spare_active 0x4 /* else not active */
388 #define DDF_spare_affinity 0x8 /* enclosure affinity */
389
390 /* The data_section contents - local scope */
391 struct disk_data {
392 be32 magic; /* DDF_PHYS_DATA_MAGIC */
393 be32 crc;
394 char guid[DDF_GUID_LEN];
395 be32 refnum; /* crc of some magic drive data ... */
396 __u8 forced_ref; /* set when above was not result of magic */
397 __u8 forced_guid; /* set if guid was forced rather than magic */
398 __u8 vendor[32];
399 __u8 pad[442];
400 };
401
402 /* bbm_section content */
403 struct bad_block_log {
404 be32 magic;
405 be32 crc;
406 be16 entry_count;
407 be32 spare_count;
408 __u8 pad[10];
409 be64 first_spare;
410 struct mapped_block {
411 be64 defective_start;
412 be32 replacement_start;
413 be16 remap_count;
414 __u8 pad[2];
415 } entries[0];
416 };
417
418 /* Struct for internally holding ddf structures */
419 /* The DDF structure stored on each device is potentially
420 * quite different, as some data is global and some is local.
421 * The global data is:
422 * - ddf header
423 * - controller_data
424 * - Physical disk records
425 * - Virtual disk records
426 * The local data is:
427 * - Configuration records
428 * - Physical Disk data section
429 * ( and Bad block and vendor which I don't care about yet).
430 *
431 * The local data is parsed into separate lists as it is read
432 * and reconstructed for writing. This means that we only need
433 * to make config changes once and they are automatically
434 * propagated to all devices.
435 * Note that the ddf_super has space of the conf and disk data
436 * for this disk and also for a list of all such data.
437 * The list is only used for the superblock that is being
438 * built in Create or Assemble to describe the whole array.
439 */
440 struct ddf_super {
441 struct ddf_header anchor, primary, secondary;
442 struct ddf_controller_data controller;
443 struct ddf_header *active;
444 struct phys_disk *phys;
445 struct virtual_disk *virt;
446 char *conf;
447 int pdsize, vdsize;
448 unsigned int max_part, mppe, conf_rec_len;
449 int currentdev;
450 int updates_pending;
451 struct vcl {
452 union {
453 char space[512];
454 struct {
455 struct vcl *next;
456 unsigned int vcnum; /* index into ->virt */
457 struct vd_config **other_bvds;
458 __u64 *block_sizes; /* NULL if all the same */
459 };
460 };
461 struct vd_config conf;
462 } *conflist, *currentconf;
463 struct dl {
464 union {
465 char space[512];
466 struct {
467 struct dl *next;
468 int major, minor;
469 char *devname;
470 int fd;
471 unsigned long long size; /* sectors */
472 be64 primary_lba; /* sectors */
473 be64 secondary_lba; /* sectors */
474 be64 workspace_lba; /* sectors */
475 int pdnum; /* index in ->phys */
476 struct spare_assign *spare;
477 void *mdupdate; /* hold metadata update */
478
479 /* These fields used by auto-layout */
480 int raiddisk; /* slot to fill in autolayout */
481 __u64 esize;
482 };
483 };
484 struct disk_data disk;
485 struct vcl *vlist[0]; /* max_part in size */
486 } *dlist, *add_list;
487 };
488
489 #ifndef offsetof
490 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
491 #endif
492
493 #if DEBUG
494 static int all_ff(const char *guid);
495 static void pr_state(struct ddf_super *ddf, const char *msg)
496 {
497 unsigned int i;
498 dprintf("%s/%s: ", __func__, msg);
499 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
500 if (all_ff(ddf->virt->entries[i].guid))
501 continue;
502 dprintf("%u(s=%02x i=%02x) ", i,
503 ddf->virt->entries[i].state,
504 ddf->virt->entries[i].init_state);
505 }
506 dprintf("\n");
507 }
508 #else
509 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
510 #endif
511
512 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
513 {
514 ddf->updates_pending = 1;
515 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
516 pr_state(ddf, func);
517 }
518
519 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
520
521 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
522 be32 refnum, unsigned int nmax,
523 const struct vd_config **bvd,
524 unsigned int *idx);
525
526 static be32 calc_crc(void *buf, int len)
527 {
528 /* crcs are always at the same place as in the ddf_header */
529 struct ddf_header *ddf = buf;
530 be32 oldcrc = ddf->crc;
531 __u32 newcrc;
532 ddf->crc = cpu_to_be32(0xffffffff);
533
534 newcrc = crc32(0, buf, len);
535 ddf->crc = oldcrc;
536 /* The crc is store (like everything) bigendian, so convert
537 * here for simplicity
538 */
539 return cpu_to_be32(newcrc);
540 }
541
542 #define DDF_INVALID_LEVEL 0xff
543 #define DDF_NO_SECONDARY 0xff
544 static int err_bad_md_layout(const mdu_array_info_t *array)
545 {
546 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
547 array->level, array->layout, array->raid_disks);
548 return -1;
549 }
550
551 static int layout_md2ddf(const mdu_array_info_t *array,
552 struct vd_config *conf)
553 {
554 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
555 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
556 __u8 sec_elmnt_count = 1;
557 __u8 srl = DDF_NO_SECONDARY;
558
559 switch (array->level) {
560 case LEVEL_LINEAR:
561 prl = DDF_CONCAT;
562 break;
563 case 0:
564 rlq = DDF_RAID0_SIMPLE;
565 prl = DDF_RAID0;
566 break;
567 case 1:
568 switch (array->raid_disks) {
569 case 2:
570 rlq = DDF_RAID1_SIMPLE;
571 break;
572 case 3:
573 rlq = DDF_RAID1_MULTI;
574 break;
575 default:
576 return err_bad_md_layout(array);
577 }
578 prl = DDF_RAID1;
579 break;
580 case 4:
581 if (array->layout != 0)
582 return err_bad_md_layout(array);
583 rlq = DDF_RAID4_N;
584 prl = DDF_RAID4;
585 break;
586 case 5:
587 switch (array->layout) {
588 case ALGORITHM_LEFT_ASYMMETRIC:
589 rlq = DDF_RAID5_N_RESTART;
590 break;
591 case ALGORITHM_RIGHT_ASYMMETRIC:
592 rlq = DDF_RAID5_0_RESTART;
593 break;
594 case ALGORITHM_LEFT_SYMMETRIC:
595 rlq = DDF_RAID5_N_CONTINUE;
596 break;
597 case ALGORITHM_RIGHT_SYMMETRIC:
598 /* not mentioned in standard */
599 default:
600 return err_bad_md_layout(array);
601 }
602 prl = DDF_RAID5;
603 break;
604 case 6:
605 switch (array->layout) {
606 case ALGORITHM_ROTATING_N_RESTART:
607 rlq = DDF_RAID5_N_RESTART;
608 break;
609 case ALGORITHM_ROTATING_ZERO_RESTART:
610 rlq = DDF_RAID6_0_RESTART;
611 break;
612 case ALGORITHM_ROTATING_N_CONTINUE:
613 rlq = DDF_RAID5_N_CONTINUE;
614 break;
615 default:
616 return err_bad_md_layout(array);
617 }
618 prl = DDF_RAID6;
619 break;
620 case 10:
621 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
622 rlq = DDF_RAID1_SIMPLE;
623 prim_elmnt_count = cpu_to_be16(2);
624 sec_elmnt_count = array->raid_disks / 2;
625 } else if (array->raid_disks % 3 == 0
626 && array->layout == 0x103) {
627 rlq = DDF_RAID1_MULTI;
628 prim_elmnt_count = cpu_to_be16(3);
629 sec_elmnt_count = array->raid_disks / 3;
630 } else
631 return err_bad_md_layout(array);
632 srl = DDF_2SPANNED;
633 prl = DDF_RAID1;
634 break;
635 default:
636 return err_bad_md_layout(array);
637 }
638 conf->prl = prl;
639 conf->prim_elmnt_count = prim_elmnt_count;
640 conf->rlq = rlq;
641 conf->srl = srl;
642 conf->sec_elmnt_count = sec_elmnt_count;
643 return 0;
644 }
645
646 static int err_bad_ddf_layout(const struct vd_config *conf)
647 {
648 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
649 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
650 return -1;
651 }
652
653 static int layout_ddf2md(const struct vd_config *conf,
654 mdu_array_info_t *array)
655 {
656 int level = LEVEL_UNSUPPORTED;
657 int layout = 0;
658 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
659
660 if (conf->sec_elmnt_count > 1) {
661 /* see also check_secondary() */
662 if (conf->prl != DDF_RAID1 ||
663 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
664 pr_err("Unsupported secondary RAID level %u/%u\n",
665 conf->prl, conf->srl);
666 return -1;
667 }
668 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
669 layout = 0x102;
670 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
671 layout = 0x103;
672 else
673 return err_bad_ddf_layout(conf);
674 raiddisks *= conf->sec_elmnt_count;
675 level = 10;
676 goto good;
677 }
678
679 switch (conf->prl) {
680 case DDF_CONCAT:
681 level = LEVEL_LINEAR;
682 break;
683 case DDF_RAID0:
684 if (conf->rlq != DDF_RAID0_SIMPLE)
685 return err_bad_ddf_layout(conf);
686 level = 0;
687 break;
688 case DDF_RAID1:
689 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
690 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
691 return err_bad_ddf_layout(conf);
692 level = 1;
693 break;
694 case DDF_RAID4:
695 if (conf->rlq != DDF_RAID4_N)
696 return err_bad_ddf_layout(conf);
697 level = 4;
698 break;
699 case DDF_RAID5:
700 switch (conf->rlq) {
701 case DDF_RAID5_N_RESTART:
702 layout = ALGORITHM_LEFT_ASYMMETRIC;
703 break;
704 case DDF_RAID5_0_RESTART:
705 layout = ALGORITHM_RIGHT_ASYMMETRIC;
706 break;
707 case DDF_RAID5_N_CONTINUE:
708 layout = ALGORITHM_LEFT_SYMMETRIC;
709 break;
710 default:
711 return err_bad_ddf_layout(conf);
712 }
713 level = 5;
714 break;
715 case DDF_RAID6:
716 switch (conf->rlq) {
717 case DDF_RAID5_N_RESTART:
718 layout = ALGORITHM_ROTATING_N_RESTART;
719 break;
720 case DDF_RAID6_0_RESTART:
721 layout = ALGORITHM_ROTATING_ZERO_RESTART;
722 break;
723 case DDF_RAID5_N_CONTINUE:
724 layout = ALGORITHM_ROTATING_N_CONTINUE;
725 break;
726 default:
727 return err_bad_ddf_layout(conf);
728 }
729 level = 6;
730 break;
731 default:
732 return err_bad_ddf_layout(conf);
733 };
734
735 good:
736 array->level = level;
737 array->layout = layout;
738 array->raid_disks = raiddisks;
739 return 0;
740 }
741
742 static int load_ddf_header(int fd, unsigned long long lba,
743 unsigned long long size,
744 int type,
745 struct ddf_header *hdr, struct ddf_header *anchor)
746 {
747 /* read a ddf header (primary or secondary) from fd/lba
748 * and check that it is consistent with anchor
749 * Need to check:
750 * magic, crc, guid, rev, and LBA's header_type, and
751 * everything after header_type must be the same
752 */
753 if (lba >= size-1)
754 return 0;
755
756 if (lseek64(fd, lba<<9, 0) < 0)
757 return 0;
758
759 if (read(fd, hdr, 512) != 512)
760 return 0;
761
762 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
763 pr_err("%s: bad header magic\n", __func__);
764 return 0;
765 }
766 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
767 pr_err("%s: bad CRC\n", __func__);
768 return 0;
769 }
770 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
771 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
772 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
773 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
774 hdr->type != type ||
775 memcmp(anchor->pad2, hdr->pad2, 512 -
776 offsetof(struct ddf_header, pad2)) != 0) {
777 pr_err("%s: header mismatch\n", __func__);
778 return 0;
779 }
780
781 /* Looks good enough to me... */
782 return 1;
783 }
784
785 static void *load_section(int fd, struct ddf_super *super, void *buf,
786 be32 offset_be, be32 len_be, int check)
787 {
788 unsigned long long offset = be32_to_cpu(offset_be);
789 unsigned long long len = be32_to_cpu(len_be);
790 int dofree = (buf == NULL);
791
792 if (check)
793 if (len != 2 && len != 8 && len != 32
794 && len != 128 && len != 512)
795 return NULL;
796
797 if (len > 1024)
798 return NULL;
799 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
800 buf = NULL;
801
802 if (!buf)
803 return NULL;
804
805 if (super->active->type == 1)
806 offset += be64_to_cpu(super->active->primary_lba);
807 else
808 offset += be64_to_cpu(super->active->secondary_lba);
809
810 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
811 if (dofree)
812 free(buf);
813 return NULL;
814 }
815 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
816 if (dofree)
817 free(buf);
818 return NULL;
819 }
820 return buf;
821 }
822
823 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
824 {
825 unsigned long long dsize;
826
827 get_dev_size(fd, NULL, &dsize);
828
829 if (lseek64(fd, dsize-512, 0) < 0) {
830 if (devname)
831 pr_err("Cannot seek to anchor block on %s: %s\n",
832 devname, strerror(errno));
833 return 1;
834 }
835 if (read(fd, &super->anchor, 512) != 512) {
836 if (devname)
837 pr_err("Cannot read anchor block on %s: %s\n",
838 devname, strerror(errno));
839 return 1;
840 }
841 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
842 if (devname)
843 pr_err("no DDF anchor found on %s\n",
844 devname);
845 return 2;
846 }
847 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
848 if (devname)
849 pr_err("bad CRC on anchor on %s\n",
850 devname);
851 return 2;
852 }
853 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
854 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
855 if (devname)
856 pr_err("can only support super revision"
857 " %.8s and earlier, not %.8s on %s\n",
858 DDF_REVISION_2, super->anchor.revision,devname);
859 return 2;
860 }
861 super->active = NULL;
862 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
863 dsize >> 9, 1,
864 &super->primary, &super->anchor) == 0) {
865 if (devname)
866 pr_err("Failed to load primary DDF header "
867 "on %s\n", devname);
868 } else
869 super->active = &super->primary;
870
871 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
872 dsize >> 9, 2,
873 &super->secondary, &super->anchor)) {
874 if (super->active == NULL
875 || (be32_to_cpu(super->primary.seq)
876 < be32_to_cpu(super->secondary.seq) &&
877 !super->secondary.openflag)
878 || (be32_to_cpu(super->primary.seq)
879 == be32_to_cpu(super->secondary.seq) &&
880 super->primary.openflag && !super->secondary.openflag)
881 )
882 super->active = &super->secondary;
883 } else if (devname &&
884 be64_to_cpu(super->anchor.secondary_lba) != ~(__u64)0)
885 pr_err("Failed to load secondary DDF header on %s\n",
886 devname);
887 if (super->active == NULL)
888 return 2;
889 return 0;
890 }
891
892 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
893 {
894 void *ok;
895 ok = load_section(fd, super, &super->controller,
896 super->active->controller_section_offset,
897 super->active->controller_section_length,
898 0);
899 super->phys = load_section(fd, super, NULL,
900 super->active->phys_section_offset,
901 super->active->phys_section_length,
902 1);
903 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
904
905 super->virt = load_section(fd, super, NULL,
906 super->active->virt_section_offset,
907 super->active->virt_section_length,
908 1);
909 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
910 if (!ok ||
911 !super->phys ||
912 !super->virt) {
913 free(super->phys);
914 free(super->virt);
915 super->phys = NULL;
916 super->virt = NULL;
917 return 2;
918 }
919 super->conflist = NULL;
920 super->dlist = NULL;
921
922 super->max_part = be16_to_cpu(super->active->max_partitions);
923 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
924 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
925 return 0;
926 }
927
928 #define DDF_UNUSED_BVD 0xff
929 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
930 {
931 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
932 unsigned int i, vdsize;
933 void *p;
934 if (n_vds == 0) {
935 vcl->other_bvds = NULL;
936 return 0;
937 }
938 vdsize = ddf->conf_rec_len * 512;
939 if (posix_memalign(&p, 512, n_vds *
940 (vdsize + sizeof(struct vd_config *))) != 0)
941 return -1;
942 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
943 for (i = 0; i < n_vds; i++) {
944 vcl->other_bvds[i] = p + i * vdsize;
945 memset(vcl->other_bvds[i], 0, vdsize);
946 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
947 }
948 return 0;
949 }
950
951 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
952 unsigned int len)
953 {
954 int i;
955 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
956 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
957 break;
958
959 if (i < vcl->conf.sec_elmnt_count-1) {
960 if (be32_to_cpu(vd->seqnum) <=
961 be32_to_cpu(vcl->other_bvds[i]->seqnum))
962 return;
963 } else {
964 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
965 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
966 break;
967 if (i == vcl->conf.sec_elmnt_count-1) {
968 pr_err("no space for sec level config %u, count is %u\n",
969 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
970 return;
971 }
972 }
973 memcpy(vcl->other_bvds[i], vd, len);
974 }
975
976 static int load_ddf_local(int fd, struct ddf_super *super,
977 char *devname, int keep)
978 {
979 struct dl *dl;
980 struct stat stb;
981 char *conf;
982 unsigned int i;
983 unsigned int confsec;
984 int vnum;
985 unsigned int max_virt_disks = be16_to_cpu
986 (super->active->max_vd_entries);
987 unsigned long long dsize;
988
989 /* First the local disk info */
990 if (posix_memalign((void**)&dl, 512,
991 sizeof(*dl) +
992 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
993 pr_err("%s could not allocate disk info buffer\n",
994 __func__);
995 return 1;
996 }
997
998 load_section(fd, super, &dl->disk,
999 super->active->data_section_offset,
1000 super->active->data_section_length,
1001 0);
1002 dl->devname = devname ? xstrdup(devname) : NULL;
1003
1004 fstat(fd, &stb);
1005 dl->major = major(stb.st_rdev);
1006 dl->minor = minor(stb.st_rdev);
1007 dl->next = super->dlist;
1008 dl->fd = keep ? fd : -1;
1009
1010 dl->size = 0;
1011 if (get_dev_size(fd, devname, &dsize))
1012 dl->size = dsize >> 9;
1013 /* If the disks have different sizes, the LBAs will differ
1014 * between phys disks.
1015 * At this point here, the values in super->active must be valid
1016 * for this phys disk. */
1017 dl->primary_lba = super->active->primary_lba;
1018 dl->secondary_lba = super->active->secondary_lba;
1019 dl->workspace_lba = super->active->workspace_lba;
1020 dl->spare = NULL;
1021 for (i = 0 ; i < super->max_part ; i++)
1022 dl->vlist[i] = NULL;
1023 super->dlist = dl;
1024 dl->pdnum = -1;
1025 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1026 if (memcmp(super->phys->entries[i].guid,
1027 dl->disk.guid, DDF_GUID_LEN) == 0)
1028 dl->pdnum = i;
1029
1030 /* Now the config list. */
1031 /* 'conf' is an array of config entries, some of which are
1032 * probably invalid. Those which are good need to be copied into
1033 * the conflist
1034 */
1035
1036 conf = load_section(fd, super, super->conf,
1037 super->active->config_section_offset,
1038 super->active->config_section_length,
1039 0);
1040 super->conf = conf;
1041 vnum = 0;
1042 for (confsec = 0;
1043 confsec < be32_to_cpu(super->active->config_section_length);
1044 confsec += super->conf_rec_len) {
1045 struct vd_config *vd =
1046 (struct vd_config *)((char*)conf + confsec*512);
1047 struct vcl *vcl;
1048
1049 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1050 if (dl->spare)
1051 continue;
1052 if (posix_memalign((void**)&dl->spare, 512,
1053 super->conf_rec_len*512) != 0) {
1054 pr_err("%s could not allocate spare info buf\n",
1055 __func__);
1056 return 1;
1057 }
1058
1059 memcpy(dl->spare, vd, super->conf_rec_len*512);
1060 continue;
1061 }
1062 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1063 continue;
1064 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1065 if (memcmp(vcl->conf.guid,
1066 vd->guid, DDF_GUID_LEN) == 0)
1067 break;
1068 }
1069
1070 if (vcl) {
1071 dl->vlist[vnum++] = vcl;
1072 if (vcl->other_bvds != NULL &&
1073 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1074 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1075 continue;
1076 }
1077 if (be32_to_cpu(vd->seqnum) <=
1078 be32_to_cpu(vcl->conf.seqnum))
1079 continue;
1080 } else {
1081 if (posix_memalign((void**)&vcl, 512,
1082 (super->conf_rec_len*512 +
1083 offsetof(struct vcl, conf))) != 0) {
1084 pr_err("%s could not allocate vcl buf\n",
1085 __func__);
1086 return 1;
1087 }
1088 vcl->next = super->conflist;
1089 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1090 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1091 if (alloc_other_bvds(super, vcl) != 0) {
1092 pr_err("%s could not allocate other bvds\n",
1093 __func__);
1094 free(vcl);
1095 return 1;
1096 };
1097 super->conflist = vcl;
1098 dl->vlist[vnum++] = vcl;
1099 }
1100 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1101 for (i=0; i < max_virt_disks ; i++)
1102 if (memcmp(super->virt->entries[i].guid,
1103 vcl->conf.guid, DDF_GUID_LEN)==0)
1104 break;
1105 if (i < max_virt_disks)
1106 vcl->vcnum = i;
1107 }
1108
1109 return 0;
1110 }
1111
1112 #ifndef MDASSEMBLE
1113 static int load_super_ddf_all(struct supertype *st, int fd,
1114 void **sbp, char *devname);
1115 #endif
1116
1117 static void free_super_ddf(struct supertype *st);
1118
1119 static int load_super_ddf(struct supertype *st, int fd,
1120 char *devname)
1121 {
1122 unsigned long long dsize;
1123 struct ddf_super *super;
1124 int rv;
1125
1126 if (get_dev_size(fd, devname, &dsize) == 0)
1127 return 1;
1128
1129 if (test_partition(fd))
1130 /* DDF is not allowed on partitions */
1131 return 1;
1132
1133 /* 32M is a lower bound */
1134 if (dsize <= 32*1024*1024) {
1135 if (devname)
1136 pr_err("%s is too small for ddf: "
1137 "size is %llu sectors.\n",
1138 devname, dsize>>9);
1139 return 1;
1140 }
1141 if (dsize & 511) {
1142 if (devname)
1143 pr_err("%s is an odd size for ddf: "
1144 "size is %llu bytes.\n",
1145 devname, dsize);
1146 return 1;
1147 }
1148
1149 free_super_ddf(st);
1150
1151 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1152 pr_err("malloc of %zu failed.\n",
1153 sizeof(*super));
1154 return 1;
1155 }
1156 memset(super, 0, sizeof(*super));
1157
1158 rv = load_ddf_headers(fd, super, devname);
1159 if (rv) {
1160 free(super);
1161 return rv;
1162 }
1163
1164 /* Have valid headers and have chosen the best. Let's read in the rest*/
1165
1166 rv = load_ddf_global(fd, super, devname);
1167
1168 if (rv) {
1169 if (devname)
1170 pr_err("Failed to load all information "
1171 "sections on %s\n", devname);
1172 free(super);
1173 return rv;
1174 }
1175
1176 rv = load_ddf_local(fd, super, devname, 0);
1177
1178 if (rv) {
1179 if (devname)
1180 pr_err("Failed to load all information "
1181 "sections on %s\n", devname);
1182 free(super);
1183 return rv;
1184 }
1185
1186 /* Should possibly check the sections .... */
1187
1188 st->sb = super;
1189 if (st->ss == NULL) {
1190 st->ss = &super_ddf;
1191 st->minor_version = 0;
1192 st->max_devs = 512;
1193 }
1194 return 0;
1195
1196 }
1197
1198 static void free_super_ddf(struct supertype *st)
1199 {
1200 struct ddf_super *ddf = st->sb;
1201 if (ddf == NULL)
1202 return;
1203 free(ddf->phys);
1204 free(ddf->virt);
1205 free(ddf->conf);
1206 while (ddf->conflist) {
1207 struct vcl *v = ddf->conflist;
1208 ddf->conflist = v->next;
1209 if (v->block_sizes)
1210 free(v->block_sizes);
1211 if (v->other_bvds)
1212 /*
1213 v->other_bvds[0] points to beginning of buffer,
1214 see alloc_other_bvds()
1215 */
1216 free(v->other_bvds[0]);
1217 free(v);
1218 }
1219 while (ddf->dlist) {
1220 struct dl *d = ddf->dlist;
1221 ddf->dlist = d->next;
1222 if (d->fd >= 0)
1223 close(d->fd);
1224 if (d->spare)
1225 free(d->spare);
1226 free(d);
1227 }
1228 while (ddf->add_list) {
1229 struct dl *d = ddf->add_list;
1230 ddf->add_list = d->next;
1231 if (d->fd >= 0)
1232 close(d->fd);
1233 if (d->spare)
1234 free(d->spare);
1235 free(d);
1236 }
1237 free(ddf);
1238 st->sb = NULL;
1239 }
1240
1241 static struct supertype *match_metadata_desc_ddf(char *arg)
1242 {
1243 /* 'ddf' only support containers */
1244 struct supertype *st;
1245 if (strcmp(arg, "ddf") != 0 &&
1246 strcmp(arg, "default") != 0
1247 )
1248 return NULL;
1249
1250 st = xcalloc(1, sizeof(*st));
1251 st->ss = &super_ddf;
1252 st->max_devs = 512;
1253 st->minor_version = 0;
1254 st->sb = NULL;
1255 return st;
1256 }
1257
1258 #ifndef MDASSEMBLE
1259
1260 static mapping_t ddf_state[] = {
1261 { "Optimal", 0},
1262 { "Degraded", 1},
1263 { "Deleted", 2},
1264 { "Missing", 3},
1265 { "Failed", 4},
1266 { "Partially Optimal", 5},
1267 { "-reserved-", 6},
1268 { "-reserved-", 7},
1269 { NULL, 0}
1270 };
1271
1272 static mapping_t ddf_init_state[] = {
1273 { "Not Initialised", 0},
1274 { "QuickInit in Progress", 1},
1275 { "Fully Initialised", 2},
1276 { "*UNKNOWN*", 3},
1277 { NULL, 0}
1278 };
1279 static mapping_t ddf_access[] = {
1280 { "Read/Write", 0},
1281 { "Reserved", 1},
1282 { "Read Only", 2},
1283 { "Blocked (no access)", 3},
1284 { NULL ,0}
1285 };
1286
1287 static mapping_t ddf_level[] = {
1288 { "RAID0", DDF_RAID0},
1289 { "RAID1", DDF_RAID1},
1290 { "RAID3", DDF_RAID3},
1291 { "RAID4", DDF_RAID4},
1292 { "RAID5", DDF_RAID5},
1293 { "RAID1E",DDF_RAID1E},
1294 { "JBOD", DDF_JBOD},
1295 { "CONCAT",DDF_CONCAT},
1296 { "RAID5E",DDF_RAID5E},
1297 { "RAID5EE",DDF_RAID5EE},
1298 { "RAID6", DDF_RAID6},
1299 { NULL, 0}
1300 };
1301 static mapping_t ddf_sec_level[] = {
1302 { "Striped", DDF_2STRIPED},
1303 { "Mirrored", DDF_2MIRRORED},
1304 { "Concat", DDF_2CONCAT},
1305 { "Spanned", DDF_2SPANNED},
1306 { NULL, 0}
1307 };
1308 #endif
1309
1310 static int all_ff(const char *guid)
1311 {
1312 int i;
1313 for (i = 0; i < DDF_GUID_LEN; i++)
1314 if (guid[i] != (char)0xff)
1315 return 0;
1316 return 1;
1317 }
1318
1319 static const char *guid_str(const char *guid)
1320 {
1321 static char buf[DDF_GUID_LEN*2+1];
1322 int i;
1323 char *p = buf;
1324 for (i = 0; i < DDF_GUID_LEN; i++) {
1325 unsigned char c = guid[i];
1326 if (c >= 32 && c < 127)
1327 p += sprintf(p, "%c", c);
1328 else
1329 p += sprintf(p, "%02x", c);
1330 }
1331 *p = '\0';
1332 return (const char *) buf;
1333 }
1334
1335 #ifndef MDASSEMBLE
1336 static void print_guid(char *guid, int tstamp)
1337 {
1338 /* A GUIDs are part (or all) ASCII and part binary.
1339 * They tend to be space padded.
1340 * We print the GUID in HEX, then in parentheses add
1341 * any initial ASCII sequence, and a possible
1342 * time stamp from bytes 16-19
1343 */
1344 int l = DDF_GUID_LEN;
1345 int i;
1346
1347 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1348 if ((i&3)==0 && i != 0) printf(":");
1349 printf("%02X", guid[i]&255);
1350 }
1351
1352 printf("\n (");
1353 while (l && guid[l-1] == ' ')
1354 l--;
1355 for (i=0 ; i<l ; i++) {
1356 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1357 fputc(guid[i], stdout);
1358 else
1359 break;
1360 }
1361 if (tstamp) {
1362 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1363 char tbuf[100];
1364 struct tm *tm;
1365 tm = localtime(&then);
1366 strftime(tbuf, 100, " %D %T",tm);
1367 fputs(tbuf, stdout);
1368 }
1369 printf(")");
1370 }
1371
1372 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1373 {
1374 int crl = sb->conf_rec_len;
1375 struct vcl *vcl;
1376
1377 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1378 unsigned int i;
1379 struct vd_config *vc = &vcl->conf;
1380
1381 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1382 continue;
1383 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1384 continue;
1385
1386 /* Ok, we know about this VD, let's give more details */
1387 printf(" Raid Devices[%d] : %d (", n,
1388 be16_to_cpu(vc->prim_elmnt_count));
1389 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1390 int j;
1391 int cnt = be16_to_cpu(sb->phys->used_pdes);
1392 for (j=0; j<cnt; j++)
1393 if (be32_eq(vc->phys_refnum[i],
1394 sb->phys->entries[j].refnum))
1395 break;
1396 if (i) printf(" ");
1397 if (j < cnt)
1398 printf("%d", j);
1399 else
1400 printf("--");
1401 }
1402 printf(")\n");
1403 if (vc->chunk_shift != 255)
1404 printf(" Chunk Size[%d] : %d sectors\n", n,
1405 1 << vc->chunk_shift);
1406 printf(" Raid Level[%d] : %s\n", n,
1407 map_num(ddf_level, vc->prl)?:"-unknown-");
1408 if (vc->sec_elmnt_count != 1) {
1409 printf(" Secondary Position[%d] : %d of %d\n", n,
1410 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1411 printf(" Secondary Level[%d] : %s\n", n,
1412 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1413 }
1414 printf(" Device Size[%d] : %llu\n", n,
1415 be64_to_cpu(vc->blocks)/2);
1416 printf(" Array Size[%d] : %llu\n", n,
1417 be64_to_cpu(vc->array_blocks)/2);
1418 }
1419 }
1420
1421 static void examine_vds(struct ddf_super *sb)
1422 {
1423 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1424 unsigned int i;
1425 printf(" Virtual Disks : %d\n", cnt);
1426
1427 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1428 struct virtual_entry *ve = &sb->virt->entries[i];
1429 if (all_ff(ve->guid))
1430 continue;
1431 printf("\n");
1432 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1433 printf("\n");
1434 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1435 printf(" state[%d] : %s, %s%s\n", i,
1436 map_num(ddf_state, ve->state & 7),
1437 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1438 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1439 printf(" init state[%d] : %s\n", i,
1440 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1441 printf(" access[%d] : %s\n", i,
1442 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1443 printf(" Name[%d] : %.16s\n", i, ve->name);
1444 examine_vd(i, sb, ve->guid);
1445 }
1446 if (cnt) printf("\n");
1447 }
1448
1449 static void examine_pds(struct ddf_super *sb)
1450 {
1451 int cnt = be16_to_cpu(sb->phys->used_pdes);
1452 int i;
1453 struct dl *dl;
1454 printf(" Physical Disks : %d\n", cnt);
1455 printf(" Number RefNo Size Device Type/State\n");
1456
1457 for (i=0 ; i<cnt ; i++) {
1458 struct phys_disk_entry *pd = &sb->phys->entries[i];
1459 int type = be16_to_cpu(pd->type);
1460 int state = be16_to_cpu(pd->state);
1461
1462 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1463 //printf("\n");
1464 printf(" %3d %08x ", i,
1465 be32_to_cpu(pd->refnum));
1466 printf("%8lluK ",
1467 be64_to_cpu(pd->config_size)>>1);
1468 for (dl = sb->dlist; dl ; dl = dl->next) {
1469 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1470 char *dv = map_dev(dl->major, dl->minor, 0);
1471 if (dv) {
1472 printf("%-15s", dv);
1473 break;
1474 }
1475 }
1476 }
1477 if (!dl)
1478 printf("%15s","");
1479 printf(" %s%s%s%s%s",
1480 (type&2) ? "active":"",
1481 (type&4) ? "Global-Spare":"",
1482 (type&8) ? "spare" : "",
1483 (type&16)? ", foreign" : "",
1484 (type&32)? "pass-through" : "");
1485 if (state & DDF_Failed)
1486 /* This over-rides these three */
1487 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1488 printf("/%s%s%s%s%s%s%s",
1489 (state&1)? "Online": "Offline",
1490 (state&2)? ", Failed": "",
1491 (state&4)? ", Rebuilding": "",
1492 (state&8)? ", in-transition": "",
1493 (state&16)? ", SMART-errors": "",
1494 (state&32)? ", Unrecovered-Read-Errors": "",
1495 (state&64)? ", Missing" : "");
1496 printf("\n");
1497 }
1498 }
1499
1500 static void examine_super_ddf(struct supertype *st, char *homehost)
1501 {
1502 struct ddf_super *sb = st->sb;
1503
1504 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1505 printf(" Version : %.8s\n", sb->anchor.revision);
1506 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1507 printf("\n");
1508 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1509 printf("\n");
1510 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1511 printf(" Redundant hdr : %s\n", be32_eq(sb->secondary.magic,
1512 DDF_HEADER_MAGIC)
1513 ?"yes" : "no");
1514 examine_vds(sb);
1515 examine_pds(sb);
1516 }
1517
1518 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1519
1520 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1521 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1522 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i);
1523
1524 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1525 {
1526 /*
1527 * Figure out the VD number for this supertype.
1528 * Returns DDF_CONTAINER for the container itself,
1529 * and DDF_NOTFOUND on error.
1530 */
1531 struct ddf_super *ddf = st->sb;
1532 struct mdinfo *sra;
1533 char *sub, *end;
1534 unsigned int vcnum;
1535
1536 if (*st->container_devnm == '\0')
1537 return DDF_CONTAINER;
1538
1539 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1540 if (!sra || sra->array.major_version != -1 ||
1541 sra->array.minor_version != -2 ||
1542 !is_subarray(sra->text_version))
1543 return DDF_NOTFOUND;
1544
1545 sub = strchr(sra->text_version + 1, '/');
1546 if (sub != NULL)
1547 vcnum = strtoul(sub + 1, &end, 10);
1548 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1549 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1550 return DDF_NOTFOUND;
1551
1552 return vcnum;
1553 }
1554
1555 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1556 {
1557 /* We just write a generic DDF ARRAY entry
1558 */
1559 struct mdinfo info;
1560 char nbuf[64];
1561 getinfo_super_ddf(st, &info, NULL);
1562 fname_from_uuid(st, &info, nbuf, ':');
1563
1564 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1565 }
1566
1567 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1568 {
1569 /* We just write a generic DDF ARRAY entry
1570 */
1571 struct ddf_super *ddf = st->sb;
1572 struct mdinfo info;
1573 unsigned int i;
1574 char nbuf[64];
1575 getinfo_super_ddf(st, &info, NULL);
1576 fname_from_uuid(st, &info, nbuf, ':');
1577
1578 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1579 struct virtual_entry *ve = &ddf->virt->entries[i];
1580 struct vcl vcl;
1581 char nbuf1[64];
1582 char namebuf[17];
1583 if (all_ff(ve->guid))
1584 continue;
1585 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1586 ddf->currentconf =&vcl;
1587 vcl.vcnum = i;
1588 uuid_from_super_ddf(st, info.uuid);
1589 fname_from_uuid(st, &info, nbuf1, ':');
1590 _ddf_array_name(namebuf, ddf, i);
1591 printf("ARRAY%s%s container=%s member=%d UUID=%s\n",
1592 namebuf[0] == '\0' ? "" : " /dev/md/", namebuf,
1593 nbuf+5, i, nbuf1+5);
1594 }
1595 }
1596
1597 static void export_examine_super_ddf(struct supertype *st)
1598 {
1599 struct mdinfo info;
1600 char nbuf[64];
1601 getinfo_super_ddf(st, &info, NULL);
1602 fname_from_uuid(st, &info, nbuf, ':');
1603 printf("MD_METADATA=ddf\n");
1604 printf("MD_LEVEL=container\n");
1605 printf("MD_UUID=%s\n", nbuf+5);
1606 printf("MD_DEVICES=%u\n",
1607 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1608 }
1609
1610 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1611 {
1612 void *buf;
1613 unsigned long long dsize, offset;
1614 int bytes;
1615 struct ddf_header *ddf;
1616 int written = 0;
1617
1618 /* The meta consists of an anchor, a primary, and a secondary.
1619 * This all lives at the end of the device.
1620 * So it is easiest to find the earliest of primary and
1621 * secondary, and copy everything from there.
1622 *
1623 * Anchor is 512 from end It contains primary_lba and secondary_lba
1624 * we choose one of those
1625 */
1626
1627 if (posix_memalign(&buf, 4096, 4096) != 0)
1628 return 1;
1629
1630 if (!get_dev_size(from, NULL, &dsize))
1631 goto err;
1632
1633 if (lseek64(from, dsize-512, 0) < 0)
1634 goto err;
1635 if (read(from, buf, 512) != 512)
1636 goto err;
1637 ddf = buf;
1638 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1639 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1640 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1641 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1642 goto err;
1643
1644 offset = dsize - 512;
1645 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1646 offset = be64_to_cpu(ddf->primary_lba) << 9;
1647 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1648 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1649
1650 bytes = dsize - offset;
1651
1652 if (lseek64(from, offset, 0) < 0 ||
1653 lseek64(to, offset, 0) < 0)
1654 goto err;
1655 while (written < bytes) {
1656 int n = bytes - written;
1657 if (n > 4096)
1658 n = 4096;
1659 if (read(from, buf, n) != n)
1660 goto err;
1661 if (write(to, buf, n) != n)
1662 goto err;
1663 written += n;
1664 }
1665 free(buf);
1666 return 0;
1667 err:
1668 free(buf);
1669 return 1;
1670 }
1671
1672 static void detail_super_ddf(struct supertype *st, char *homehost)
1673 {
1674 /* FIXME later
1675 * Could print DDF GUID
1676 * Need to find which array
1677 * If whole, briefly list all arrays
1678 * If one, give name
1679 */
1680 }
1681
1682 static const char *vendors_with_variable_volume_UUID[] = {
1683 "LSI ",
1684 };
1685
1686 static int volume_id_is_reliable(const struct ddf_super *ddf)
1687 {
1688 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1689 int i;
1690 for (i = 0; i < n; i++)
1691 if (!memcmp(ddf->controller.guid,
1692 vendors_with_variable_volume_UUID[i], 8))
1693 return 0;
1694 return 1;
1695 }
1696
1697 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1698 unsigned int vcnum, int uuid[4])
1699 {
1700 char buf[DDF_GUID_LEN+18], sha[20], *p;
1701 struct sha1_ctx ctx;
1702 if (volume_id_is_reliable(ddf)) {
1703 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1704 return;
1705 }
1706 /*
1707 * Some fake RAID BIOSes (in particular, LSI ones) change the
1708 * VD GUID at every boot. These GUIDs are not suitable for
1709 * identifying an array. Luckily the header GUID appears to
1710 * remain constant.
1711 * We construct a pseudo-UUID from the header GUID and those
1712 * properties of the subarray that we expect to remain constant.
1713 */
1714 memset(buf, 0, sizeof(buf));
1715 p = buf;
1716 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1717 p += DDF_GUID_LEN;
1718 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1719 p += 16;
1720 *((__u16 *) p) = vcnum;
1721 sha1_init_ctx(&ctx);
1722 sha1_process_bytes(buf, sizeof(buf), &ctx);
1723 sha1_finish_ctx(&ctx, sha);
1724 memcpy(uuid, sha, 4*4);
1725 }
1726
1727 static void brief_detail_super_ddf(struct supertype *st)
1728 {
1729 struct mdinfo info;
1730 char nbuf[64];
1731 struct ddf_super *ddf = st->sb;
1732 unsigned int vcnum = get_vd_num_of_subarray(st);
1733 if (vcnum == DDF_CONTAINER)
1734 uuid_from_super_ddf(st, info.uuid);
1735 else if (vcnum == DDF_NOTFOUND)
1736 return;
1737 else
1738 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1739 fname_from_uuid(st, &info, nbuf,':');
1740 printf(" UUID=%s", nbuf + 5);
1741 }
1742 #endif
1743
1744 static int match_home_ddf(struct supertype *st, char *homehost)
1745 {
1746 /* It matches 'this' host if the controller is a
1747 * Linux-MD controller with vendor_data matching
1748 * the hostname
1749 */
1750 struct ddf_super *ddf = st->sb;
1751 unsigned int len;
1752
1753 if (!homehost)
1754 return 0;
1755 len = strlen(homehost);
1756
1757 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1758 len < sizeof(ddf->controller.vendor_data) &&
1759 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1760 ddf->controller.vendor_data[len] == 0);
1761 }
1762
1763 #ifndef MDASSEMBLE
1764 static int find_index_in_bvd(const struct ddf_super *ddf,
1765 const struct vd_config *conf, unsigned int n,
1766 unsigned int *n_bvd)
1767 {
1768 /*
1769 * Find the index of the n-th valid physical disk in this BVD
1770 */
1771 unsigned int i, j;
1772 for (i = 0, j = 0; i < ddf->mppe &&
1773 j < be16_to_cpu(conf->prim_elmnt_count); i++) {
1774 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1775 if (n == j) {
1776 *n_bvd = i;
1777 return 1;
1778 }
1779 j++;
1780 }
1781 }
1782 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1783 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1784 return 0;
1785 }
1786
1787 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1788 unsigned int n,
1789 unsigned int *n_bvd, struct vcl **vcl)
1790 {
1791 struct vcl *v;
1792
1793 for (v = ddf->conflist; v; v = v->next) {
1794 unsigned int nsec, ibvd = 0;
1795 struct vd_config *conf;
1796 if (inst != v->vcnum)
1797 continue;
1798 conf = &v->conf;
1799 if (conf->sec_elmnt_count == 1) {
1800 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1801 *vcl = v;
1802 return conf;
1803 } else
1804 goto bad;
1805 }
1806 if (v->other_bvds == NULL) {
1807 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1808 __func__, conf->sec_elmnt_count);
1809 goto bad;
1810 }
1811 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1812 if (conf->sec_elmnt_seq != nsec) {
1813 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1814 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1815 == nsec)
1816 break;
1817 }
1818 if (ibvd == conf->sec_elmnt_count)
1819 goto bad;
1820 conf = v->other_bvds[ibvd-1];
1821 }
1822 if (!find_index_in_bvd(ddf, conf,
1823 n - nsec*conf->sec_elmnt_count, n_bvd))
1824 goto bad;
1825 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1826 , __func__, n, *n_bvd, ibvd, inst);
1827 *vcl = v;
1828 return conf;
1829 }
1830 bad:
1831 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1832 return NULL;
1833 }
1834 #endif
1835
1836 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1837 {
1838 /* Find the entry in phys_disk which has the given refnum
1839 * and return it's index
1840 */
1841 unsigned int i;
1842 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1843 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1844 return i;
1845 return -1;
1846 }
1847
1848 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1849 {
1850 char buf[20];
1851 struct sha1_ctx ctx;
1852 sha1_init_ctx(&ctx);
1853 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1854 sha1_finish_ctx(&ctx, buf);
1855 memcpy(uuid, buf, 4*4);
1856 }
1857
1858 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1859 {
1860 /* The uuid returned here is used for:
1861 * uuid to put into bitmap file (Create, Grow)
1862 * uuid for backup header when saving critical section (Grow)
1863 * comparing uuids when re-adding a device into an array
1864 * In these cases the uuid required is that of the data-array,
1865 * not the device-set.
1866 * uuid to recognise same set when adding a missing device back
1867 * to an array. This is a uuid for the device-set.
1868 *
1869 * For each of these we can make do with a truncated
1870 * or hashed uuid rather than the original, as long as
1871 * everyone agrees.
1872 * In the case of SVD we assume the BVD is of interest,
1873 * though that might be the case if a bitmap were made for
1874 * a mirrored SVD - worry about that later.
1875 * So we need to find the VD configuration record for the
1876 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1877 * The first 16 bytes of the sha1 of these is used.
1878 */
1879 struct ddf_super *ddf = st->sb;
1880 struct vcl *vcl = ddf->currentconf;
1881
1882 if (vcl)
1883 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1884 else
1885 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1886 }
1887
1888 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1889
1890 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1891 {
1892 struct ddf_super *ddf = st->sb;
1893 int map_disks = info->array.raid_disks;
1894 __u32 *cptr;
1895
1896 if (ddf->currentconf) {
1897 getinfo_super_ddf_bvd(st, info, map);
1898 return;
1899 }
1900 memset(info, 0, sizeof(*info));
1901
1902 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1903 info->array.level = LEVEL_CONTAINER;
1904 info->array.layout = 0;
1905 info->array.md_minor = -1;
1906 cptr = (__u32 *)(ddf->anchor.guid + 16);
1907 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1908
1909 info->array.utime = 0;
1910 info->array.chunk_size = 0;
1911 info->container_enough = 1;
1912
1913 info->disk.major = 0;
1914 info->disk.minor = 0;
1915 if (ddf->dlist) {
1916 struct phys_disk_entry *pde = NULL;
1917 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1918 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1919
1920 info->data_offset = be64_to_cpu(ddf->phys->
1921 entries[info->disk.raid_disk].
1922 config_size);
1923 info->component_size = ddf->dlist->size - info->data_offset;
1924 if (info->disk.raid_disk >= 0)
1925 pde = ddf->phys->entries + info->disk.raid_disk;
1926 if (pde &&
1927 !(be16_to_cpu(pde->state) & DDF_Failed))
1928 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1929 else
1930 info->disk.state = 1 << MD_DISK_FAULTY;
1931 } else {
1932 info->disk.number = -1;
1933 info->disk.raid_disk = -1;
1934 // info->disk.raid_disk = find refnum in the table and use index;
1935 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1936 }
1937
1938 info->recovery_start = MaxSector;
1939 info->reshape_active = 0;
1940 info->recovery_blocked = 0;
1941 info->name[0] = 0;
1942
1943 info->array.major_version = -1;
1944 info->array.minor_version = -2;
1945 strcpy(info->text_version, "ddf");
1946 info->safe_mode_delay = 0;
1947
1948 uuid_from_super_ddf(st, info->uuid);
1949
1950 if (map) {
1951 int i;
1952 for (i = 0 ; i < map_disks; i++) {
1953 if (i < info->array.raid_disks &&
1954 !(be16_to_cpu(ddf->phys->entries[i].state)
1955 & DDF_Failed))
1956 map[i] = 1;
1957 else
1958 map[i] = 0;
1959 }
1960 }
1961 }
1962
1963 /* size of name must be at least 17 bytes! */
1964 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i)
1965 {
1966 int j;
1967 memcpy(name, ddf->virt->entries[i].name, 16);
1968 name[16] = 0;
1969 for(j = 0; j < 16; j++)
1970 if (name[j] == ' ')
1971 name[j] = 0;
1972 }
1973
1974 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1975 {
1976 struct ddf_super *ddf = st->sb;
1977 struct vcl *vc = ddf->currentconf;
1978 int cd = ddf->currentdev;
1979 int n_prim;
1980 int j;
1981 struct dl *dl;
1982 int map_disks = info->array.raid_disks;
1983 __u32 *cptr;
1984 struct vd_config *conf;
1985
1986 memset(info, 0, sizeof(*info));
1987 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1988 return;
1989 info->array.md_minor = -1;
1990 cptr = (__u32 *)(vc->conf.guid + 16);
1991 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1992 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
1993 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1994 info->custom_array_size = 0;
1995
1996 conf = &vc->conf;
1997 n_prim = be16_to_cpu(conf->prim_elmnt_count);
1998 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
1999 int ibvd = cd / n_prim - 1;
2000 cd %= n_prim;
2001 conf = vc->other_bvds[ibvd];
2002 }
2003
2004 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
2005 info->data_offset =
2006 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
2007 if (vc->block_sizes)
2008 info->component_size = vc->block_sizes[cd];
2009 else
2010 info->component_size = be64_to_cpu(conf->blocks);
2011 }
2012
2013 for (dl = ddf->dlist; dl ; dl = dl->next)
2014 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
2015 break;
2016
2017 info->disk.major = 0;
2018 info->disk.minor = 0;
2019 info->disk.state = 0;
2020 if (dl) {
2021 info->disk.major = dl->major;
2022 info->disk.minor = dl->minor;
2023 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2024 * be16_to_cpu(conf->prim_elmnt_count);
2025 info->disk.number = dl->pdnum;
2026 info->disk.state = 0;
2027 if (info->disk.number >= 0 &&
2028 (be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Online) &&
2029 !(be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Failed))
2030 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2031 }
2032
2033 info->container_member = ddf->currentconf->vcnum;
2034
2035 info->recovery_start = MaxSector;
2036 info->resync_start = 0;
2037 info->reshape_active = 0;
2038 info->recovery_blocked = 0;
2039 if (!(ddf->virt->entries[info->container_member].state
2040 & DDF_state_inconsistent) &&
2041 (ddf->virt->entries[info->container_member].init_state
2042 & DDF_initstate_mask)
2043 == DDF_init_full)
2044 info->resync_start = MaxSector;
2045
2046 uuid_from_super_ddf(st, info->uuid);
2047
2048 info->array.major_version = -1;
2049 info->array.minor_version = -2;
2050 sprintf(info->text_version, "/%s/%d",
2051 st->container_devnm,
2052 info->container_member);
2053 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2054
2055 _ddf_array_name(info->name, ddf, info->container_member);
2056
2057 if (map)
2058 for (j = 0; j < map_disks; j++) {
2059 map[j] = 0;
2060 if (j < info->array.raid_disks) {
2061 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2062 if (i >= 0 &&
2063 (be16_to_cpu(ddf->phys->entries[i].state)
2064 & DDF_Online) &&
2065 !(be16_to_cpu(ddf->phys->entries[i].state)
2066 & DDF_Failed))
2067 map[i] = 1;
2068 }
2069 }
2070 }
2071
2072 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2073 char *update,
2074 char *devname, int verbose,
2075 int uuid_set, char *homehost)
2076 {
2077 /* For 'assemble' and 'force' we need to return non-zero if any
2078 * change was made. For others, the return value is ignored.
2079 * Update options are:
2080 * force-one : This device looks a bit old but needs to be included,
2081 * update age info appropriately.
2082 * assemble: clear any 'faulty' flag to allow this device to
2083 * be assembled.
2084 * force-array: Array is degraded but being forced, mark it clean
2085 * if that will be needed to assemble it.
2086 *
2087 * newdev: not used ????
2088 * grow: Array has gained a new device - this is currently for
2089 * linear only
2090 * resync: mark as dirty so a resync will happen.
2091 * uuid: Change the uuid of the array to match what is given
2092 * homehost: update the recorded homehost
2093 * name: update the name - preserving the homehost
2094 * _reshape_progress: record new reshape_progress position.
2095 *
2096 * Following are not relevant for this version:
2097 * sparc2.2 : update from old dodgey metadata
2098 * super-minor: change the preferred_minor number
2099 * summaries: update redundant counters.
2100 */
2101 int rv = 0;
2102 // struct ddf_super *ddf = st->sb;
2103 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2104 // struct virtual_entry *ve = find_ve(ddf);
2105
2106 /* we don't need to handle "force-*" or "assemble" as
2107 * there is no need to 'trick' the kernel. We the metadata is
2108 * first updated to activate the array, all the implied modifications
2109 * will just happen.
2110 */
2111
2112 if (strcmp(update, "grow") == 0) {
2113 /* FIXME */
2114 } else if (strcmp(update, "resync") == 0) {
2115 // info->resync_checkpoint = 0;
2116 } else if (strcmp(update, "homehost") == 0) {
2117 /* homehost is stored in controller->vendor_data,
2118 * or it is when we are the vendor
2119 */
2120 // if (info->vendor_is_local)
2121 // strcpy(ddf->controller.vendor_data, homehost);
2122 rv = -1;
2123 } else if (strcmp(update, "name") == 0) {
2124 /* name is stored in virtual_entry->name */
2125 // memset(ve->name, ' ', 16);
2126 // strncpy(ve->name, info->name, 16);
2127 rv = -1;
2128 } else if (strcmp(update, "_reshape_progress") == 0) {
2129 /* We don't support reshape yet */
2130 } else if (strcmp(update, "assemble") == 0 ) {
2131 /* Do nothing, just succeed */
2132 rv = 0;
2133 } else
2134 rv = -1;
2135
2136 // update_all_csum(ddf);
2137
2138 return rv;
2139 }
2140
2141 static void make_header_guid(char *guid)
2142 {
2143 be32 stamp;
2144 /* Create a DDF Header of Virtual Disk GUID */
2145
2146 /* 24 bytes of fiction required.
2147 * first 8 are a 'vendor-id' - "Linux-MD"
2148 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2149 * Remaining 8 random number plus timestamp
2150 */
2151 memcpy(guid, T10, sizeof(T10));
2152 stamp = cpu_to_be32(0xdeadbeef);
2153 memcpy(guid+8, &stamp, 4);
2154 stamp = cpu_to_be32(0);
2155 memcpy(guid+12, &stamp, 4);
2156 stamp = cpu_to_be32(time(0) - DECADE);
2157 memcpy(guid+16, &stamp, 4);
2158 stamp._v32 = random32();
2159 memcpy(guid+20, &stamp, 4);
2160 }
2161
2162 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2163 {
2164 unsigned int i;
2165 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2166 if (all_ff(ddf->virt->entries[i].guid))
2167 return i;
2168 }
2169 return DDF_NOTFOUND;
2170 }
2171
2172 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2173 const char *name)
2174 {
2175 unsigned int i;
2176 if (name == NULL)
2177 return DDF_NOTFOUND;
2178 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2179 if (all_ff(ddf->virt->entries[i].guid))
2180 continue;
2181 if (!strncmp(name, ddf->virt->entries[i].name,
2182 sizeof(ddf->virt->entries[i].name)))
2183 return i;
2184 }
2185 return DDF_NOTFOUND;
2186 }
2187
2188 #ifndef MDASSEMBLE
2189 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2190 const char *guid)
2191 {
2192 unsigned int i;
2193 if (guid == NULL || all_ff(guid))
2194 return DDF_NOTFOUND;
2195 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2196 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2197 return i;
2198 return DDF_NOTFOUND;
2199 }
2200 #endif
2201
2202 static int init_super_ddf_bvd(struct supertype *st,
2203 mdu_array_info_t *info,
2204 unsigned long long size,
2205 char *name, char *homehost,
2206 int *uuid, unsigned long long data_offset);
2207
2208 static int init_super_ddf(struct supertype *st,
2209 mdu_array_info_t *info,
2210 unsigned long long size, char *name, char *homehost,
2211 int *uuid, unsigned long long data_offset)
2212 {
2213 /* This is primarily called by Create when creating a new array.
2214 * We will then get add_to_super called for each component, and then
2215 * write_init_super called to write it out to each device.
2216 * For DDF, Create can create on fresh devices or on a pre-existing
2217 * array.
2218 * To create on a pre-existing array a different method will be called.
2219 * This one is just for fresh drives.
2220 *
2221 * We need to create the entire 'ddf' structure which includes:
2222 * DDF headers - these are easy.
2223 * Controller data - a Sector describing this controller .. not that
2224 * this is a controller exactly.
2225 * Physical Disk Record - one entry per device, so
2226 * leave plenty of space.
2227 * Virtual Disk Records - again, just leave plenty of space.
2228 * This just lists VDs, doesn't give details
2229 * Config records - describes the VDs that use this disk
2230 * DiskData - describes 'this' device.
2231 * BadBlockManagement - empty
2232 * Diag Space - empty
2233 * Vendor Logs - Could we put bitmaps here?
2234 *
2235 */
2236 struct ddf_super *ddf;
2237 char hostname[17];
2238 int hostlen;
2239 int max_phys_disks, max_virt_disks;
2240 unsigned long long sector;
2241 int clen;
2242 int i;
2243 int pdsize, vdsize;
2244 struct phys_disk *pd;
2245 struct virtual_disk *vd;
2246
2247 if (data_offset != INVALID_SECTORS) {
2248 pr_err("data-offset not supported by DDF\n");
2249 return 0;
2250 }
2251
2252 if (st->sb)
2253 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2254 data_offset);
2255
2256 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2257 pr_err("%s could not allocate superblock\n", __func__);
2258 return 0;
2259 }
2260 memset(ddf, 0, sizeof(*ddf));
2261 ddf->dlist = NULL; /* no physical disks yet */
2262 ddf->conflist = NULL; /* No virtual disks yet */
2263 st->sb = ddf;
2264
2265 if (info == NULL) {
2266 /* zeroing superblock */
2267 return 0;
2268 }
2269
2270 /* At least 32MB *must* be reserved for the ddf. So let's just
2271 * start 32MB from the end, and put the primary header there.
2272 * Don't do secondary for now.
2273 * We don't know exactly where that will be yet as it could be
2274 * different on each device. To just set up the lengths.
2275 *
2276 */
2277
2278 ddf->anchor.magic = DDF_HEADER_MAGIC;
2279 make_header_guid(ddf->anchor.guid);
2280
2281 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2282 ddf->anchor.seq = cpu_to_be32(1);
2283 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2284 ddf->anchor.openflag = 0xFF;
2285 ddf->anchor.foreignflag = 0;
2286 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2287 ddf->anchor.pad0 = 0xff;
2288 memset(ddf->anchor.pad1, 0xff, 12);
2289 memset(ddf->anchor.header_ext, 0xff, 32);
2290 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2291 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2292 ddf->anchor.type = DDF_HEADER_ANCHOR;
2293 memset(ddf->anchor.pad2, 0xff, 3);
2294 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2295 /* Put this at bottom of 32M reserved.. */
2296 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2297 max_phys_disks = 1023; /* Should be enough */
2298 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2299 max_virt_disks = 255;
2300 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks); /* ?? */
2301 ddf->anchor.max_partitions = cpu_to_be16(64); /* ?? */
2302 ddf->max_part = 64;
2303 ddf->mppe = 256;
2304 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2305 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2306 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2307 memset(ddf->anchor.pad3, 0xff, 54);
2308 /* controller sections is one sector long immediately
2309 * after the ddf header */
2310 sector = 1;
2311 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2312 ddf->anchor.controller_section_length = cpu_to_be32(1);
2313 sector += 1;
2314
2315 /* phys is 8 sectors after that */
2316 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2317 sizeof(struct phys_disk_entry)*max_phys_disks,
2318 512);
2319 switch(pdsize/512) {
2320 case 2: case 8: case 32: case 128: case 512: break;
2321 default: abort();
2322 }
2323 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2324 ddf->anchor.phys_section_length =
2325 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2326 sector += pdsize/512;
2327
2328 /* virt is another 32 sectors */
2329 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2330 sizeof(struct virtual_entry) * max_virt_disks,
2331 512);
2332 switch(vdsize/512) {
2333 case 2: case 8: case 32: case 128: case 512: break;
2334 default: abort();
2335 }
2336 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2337 ddf->anchor.virt_section_length =
2338 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2339 sector += vdsize/512;
2340
2341 clen = ddf->conf_rec_len * (ddf->max_part+1);
2342 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2343 ddf->anchor.config_section_length = cpu_to_be32(clen);
2344 sector += clen;
2345
2346 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2347 ddf->anchor.data_section_length = cpu_to_be32(1);
2348 sector += 1;
2349
2350 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2351 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2352 ddf->anchor.diag_space_length = cpu_to_be32(0);
2353 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2354 ddf->anchor.vendor_length = cpu_to_be32(0);
2355 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2356
2357 memset(ddf->anchor.pad4, 0xff, 256);
2358
2359 memcpy(&ddf->primary, &ddf->anchor, 512);
2360 memcpy(&ddf->secondary, &ddf->anchor, 512);
2361
2362 ddf->primary.openflag = 1; /* I guess.. */
2363 ddf->primary.type = DDF_HEADER_PRIMARY;
2364
2365 ddf->secondary.openflag = 1; /* I guess.. */
2366 ddf->secondary.type = DDF_HEADER_SECONDARY;
2367
2368 ddf->active = &ddf->primary;
2369
2370 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2371
2372 /* 24 more bytes of fiction required.
2373 * first 8 are a 'vendor-id' - "Linux-MD"
2374 * Remaining 16 are serial number.... maybe a hostname would do?
2375 */
2376 memcpy(ddf->controller.guid, T10, sizeof(T10));
2377 gethostname(hostname, sizeof(hostname));
2378 hostname[sizeof(hostname) - 1] = 0;
2379 hostlen = strlen(hostname);
2380 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2381 for (i = strlen(T10) ; i+hostlen < 24; i++)
2382 ddf->controller.guid[i] = ' ';
2383
2384 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2385 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2386 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2387 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2388 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2389 memset(ddf->controller.pad, 0xff, 8);
2390 memset(ddf->controller.vendor_data, 0xff, 448);
2391 if (homehost && strlen(homehost) < 440)
2392 strcpy((char*)ddf->controller.vendor_data, homehost);
2393
2394 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2395 pr_err("%s could not allocate pd\n", __func__);
2396 return 0;
2397 }
2398 ddf->phys = pd;
2399 ddf->pdsize = pdsize;
2400
2401 memset(pd, 0xff, pdsize);
2402 memset(pd, 0, sizeof(*pd));
2403 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2404 pd->used_pdes = cpu_to_be16(0);
2405 pd->max_pdes = cpu_to_be16(max_phys_disks);
2406 memset(pd->pad, 0xff, 52);
2407 for (i = 0; i < max_phys_disks; i++)
2408 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2409
2410 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2411 pr_err("%s could not allocate vd\n", __func__);
2412 return 0;
2413 }
2414 ddf->virt = vd;
2415 ddf->vdsize = vdsize;
2416 memset(vd, 0, vdsize);
2417 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2418 vd->populated_vdes = cpu_to_be16(0);
2419 vd->max_vdes = cpu_to_be16(max_virt_disks);
2420 memset(vd->pad, 0xff, 52);
2421
2422 for (i=0; i<max_virt_disks; i++)
2423 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2424
2425 st->sb = ddf;
2426 ddf_set_updates_pending(ddf);
2427 return 1;
2428 }
2429
2430 static int chunk_to_shift(int chunksize)
2431 {
2432 return ffs(chunksize/512)-1;
2433 }
2434
2435 #ifndef MDASSEMBLE
2436 struct extent {
2437 unsigned long long start, size;
2438 };
2439 static int cmp_extent(const void *av, const void *bv)
2440 {
2441 const struct extent *a = av;
2442 const struct extent *b = bv;
2443 if (a->start < b->start)
2444 return -1;
2445 if (a->start > b->start)
2446 return 1;
2447 return 0;
2448 }
2449
2450 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2451 {
2452 /* find a list of used extents on the give physical device
2453 * (dnum) of the given ddf.
2454 * Return a malloced array of 'struct extent'
2455
2456 * FIXME ignore DDF_Legacy devices?
2457
2458 */
2459 struct extent *rv;
2460 int n = 0;
2461 unsigned int i;
2462 __u16 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2463
2464 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2465 return NULL;
2466
2467 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2468
2469 for (i = 0; i < ddf->max_part; i++) {
2470 const struct vd_config *bvd;
2471 unsigned int ibvd;
2472 struct vcl *v = dl->vlist[i];
2473 if (v == NULL ||
2474 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2475 &bvd, &ibvd) == DDF_NOTFOUND)
2476 continue;
2477 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2478 rv[n].size = be64_to_cpu(bvd->blocks);
2479 n++;
2480 }
2481 qsort(rv, n, sizeof(*rv), cmp_extent);
2482
2483 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2484 rv[n].size = 0;
2485 return rv;
2486 }
2487 #endif
2488
2489 static int init_super_ddf_bvd(struct supertype *st,
2490 mdu_array_info_t *info,
2491 unsigned long long size,
2492 char *name, char *homehost,
2493 int *uuid, unsigned long long data_offset)
2494 {
2495 /* We are creating a BVD inside a pre-existing container.
2496 * so st->sb is already set.
2497 * We need to create a new vd_config and a new virtual_entry
2498 */
2499 struct ddf_super *ddf = st->sb;
2500 unsigned int venum, i;
2501 struct virtual_entry *ve;
2502 struct vcl *vcl;
2503 struct vd_config *vc;
2504
2505 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2506 pr_err("This ddf already has an array called %s\n", name);
2507 return 0;
2508 }
2509 venum = find_unused_vde(ddf);
2510 if (venum == DDF_NOTFOUND) {
2511 pr_err("Cannot find spare slot for virtual disk\n");
2512 return 0;
2513 }
2514 ve = &ddf->virt->entries[venum];
2515
2516 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2517 * timestamp, random number
2518 */
2519 make_header_guid(ve->guid);
2520 ve->unit = cpu_to_be16(info->md_minor);
2521 ve->pad0 = 0xFFFF;
2522 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2523 DDF_GUID_LEN);
2524 ve->type = cpu_to_be16(0);
2525 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2526 if (info->state & 1) /* clean */
2527 ve->init_state = DDF_init_full;
2528 else
2529 ve->init_state = DDF_init_not;
2530
2531 memset(ve->pad1, 0xff, 14);
2532 memset(ve->name, ' ', 16);
2533 if (name)
2534 strncpy(ve->name, name, 16);
2535 ddf->virt->populated_vdes =
2536 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2537
2538 /* Now create a new vd_config */
2539 if (posix_memalign((void**)&vcl, 512,
2540 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2541 pr_err("%s could not allocate vd_config\n", __func__);
2542 return 0;
2543 }
2544 vcl->vcnum = venum;
2545 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2546 vc = &vcl->conf;
2547
2548 vc->magic = DDF_VD_CONF_MAGIC;
2549 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2550 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2551 vc->seqnum = cpu_to_be32(1);
2552 memset(vc->pad0, 0xff, 24);
2553 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2554 if (layout_md2ddf(info, vc) == -1 ||
2555 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2556 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2557 __func__, info->level, info->layout, info->raid_disks);
2558 free(vcl);
2559 return 0;
2560 }
2561 vc->sec_elmnt_seq = 0;
2562 if (alloc_other_bvds(ddf, vcl) != 0) {
2563 pr_err("%s could not allocate other bvds\n",
2564 __func__);
2565 free(vcl);
2566 return 0;
2567 }
2568 vc->blocks = cpu_to_be64(info->size * 2);
2569 vc->array_blocks = cpu_to_be64(
2570 calc_array_size(info->level, info->raid_disks, info->layout,
2571 info->chunk_size, info->size*2));
2572 memset(vc->pad1, 0xff, 8);
2573 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2574 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2575 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2576 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2577 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2578 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2579 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2580 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2581 memset(vc->cache_pol, 0, 8);
2582 vc->bg_rate = 0x80;
2583 memset(vc->pad2, 0xff, 3);
2584 memset(vc->pad3, 0xff, 52);
2585 memset(vc->pad4, 0xff, 192);
2586 memset(vc->v0, 0xff, 32);
2587 memset(vc->v1, 0xff, 32);
2588 memset(vc->v2, 0xff, 16);
2589 memset(vc->v3, 0xff, 16);
2590 memset(vc->vendor, 0xff, 32);
2591
2592 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2593 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2594
2595 for (i = 1; i < vc->sec_elmnt_count; i++) {
2596 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2597 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2598 }
2599
2600 vcl->next = ddf->conflist;
2601 ddf->conflist = vcl;
2602 ddf->currentconf = vcl;
2603 ddf_set_updates_pending(ddf);
2604 return 1;
2605 }
2606
2607
2608 #ifndef MDASSEMBLE
2609 static int get_svd_state(const struct ddf_super *, const struct vcl *);
2610
2611 static void add_to_super_ddf_bvd(struct supertype *st,
2612 mdu_disk_info_t *dk, int fd, char *devname)
2613 {
2614 /* fd and devname identify a device with-in the ddf container (st).
2615 * dk identifies a location in the new BVD.
2616 * We need to find suitable free space in that device and update
2617 * the phys_refnum and lba_offset for the newly created vd_config.
2618 * We might also want to update the type in the phys_disk
2619 * section.
2620 *
2621 * Alternately: fd == -1 and we have already chosen which device to
2622 * use and recorded in dlist->raid_disk;
2623 */
2624 struct dl *dl;
2625 struct ddf_super *ddf = st->sb;
2626 struct vd_config *vc;
2627 unsigned int i;
2628 unsigned long long blocks, pos, esize;
2629 struct extent *ex;
2630 unsigned int raid_disk = dk->raid_disk;
2631
2632 if (fd == -1) {
2633 for (dl = ddf->dlist; dl ; dl = dl->next)
2634 if (dl->raiddisk == dk->raid_disk)
2635 break;
2636 } else {
2637 for (dl = ddf->dlist; dl ; dl = dl->next)
2638 if (dl->major == dk->major &&
2639 dl->minor == dk->minor)
2640 break;
2641 }
2642 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2643 return;
2644
2645 vc = &ddf->currentconf->conf;
2646 if (vc->sec_elmnt_count > 1) {
2647 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2648 if (raid_disk >= n)
2649 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2650 raid_disk %= n;
2651 }
2652
2653 ex = get_extents(ddf, dl);
2654 if (!ex)
2655 return;
2656
2657 i = 0; pos = 0;
2658 blocks = be64_to_cpu(vc->blocks);
2659 if (ddf->currentconf->block_sizes)
2660 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2661
2662 do {
2663 esize = ex[i].start - pos;
2664 if (esize >= blocks)
2665 break;
2666 pos = ex[i].start + ex[i].size;
2667 i++;
2668 } while (ex[i-1].size);
2669
2670 free(ex);
2671 if (esize < blocks)
2672 return;
2673
2674 ddf->currentdev = dk->raid_disk;
2675 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2676 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2677
2678 for (i = 0; i < ddf->max_part ; i++)
2679 if (dl->vlist[i] == NULL)
2680 break;
2681 if (i == ddf->max_part)
2682 return;
2683 dl->vlist[i] = ddf->currentconf;
2684
2685 if (fd >= 0)
2686 dl->fd = fd;
2687 if (devname)
2688 dl->devname = devname;
2689
2690 /* Check if we can mark array as optimal yet */
2691 i = ddf->currentconf->vcnum;
2692 ddf->virt->entries[i].state =
2693 (ddf->virt->entries[i].state & ~DDF_state_mask)
2694 | get_svd_state(ddf, ddf->currentconf);
2695 be16_clear(ddf->phys->entries[dl->pdnum].type,
2696 cpu_to_be16(DDF_Global_Spare));
2697 be16_set(ddf->phys->entries[dl->pdnum].type,
2698 cpu_to_be16(DDF_Active_in_VD));
2699 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2700 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2701 ddf->currentconf->vcnum, guid_str(vc->guid),
2702 dk->raid_disk);
2703 ddf_set_updates_pending(ddf);
2704 }
2705
2706 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2707 {
2708 unsigned int i;
2709 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2710 if (all_ff(ddf->phys->entries[i].guid))
2711 return i;
2712 }
2713 return DDF_NOTFOUND;
2714 }
2715
2716 static void _set_config_size(struct phys_disk_entry *pde, const struct dl *dl)
2717 {
2718 __u64 cfs, t;
2719 cfs = min(dl->size - 32*1024*2ULL, be64_to_cpu(dl->primary_lba));
2720 t = be64_to_cpu(dl->secondary_lba);
2721 if (t != ~(__u64)0)
2722 cfs = min(cfs, t);
2723 /*
2724 * Some vendor DDF structures interpret workspace_lba
2725 * very differently then us. Make a sanity check on the value.
2726 */
2727 t = be64_to_cpu(dl->workspace_lba);
2728 if (t < cfs) {
2729 __u64 wsp = cfs - t;
2730 if (wsp > 1024*1024*2ULL && wsp > dl->size / 16) {
2731 pr_err("%s: %x:%x: workspace size 0x%llx too big, ignoring\n",
2732 __func__, dl->major, dl->minor, wsp);
2733 } else
2734 cfs = t;
2735 }
2736 pde->config_size = cpu_to_be64(cfs);
2737 dprintf("%s: %x:%x config_size %llx, DDF structure is %llx blocks\n",
2738 __func__, dl->major, dl->minor, cfs, dl->size-cfs);
2739 }
2740
2741 /* add a device to a container, either while creating it or while
2742 * expanding a pre-existing container
2743 */
2744 static int add_to_super_ddf(struct supertype *st,
2745 mdu_disk_info_t *dk, int fd, char *devname,
2746 unsigned long long data_offset)
2747 {
2748 struct ddf_super *ddf = st->sb;
2749 struct dl *dd;
2750 time_t now;
2751 struct tm *tm;
2752 unsigned long long size;
2753 struct phys_disk_entry *pde;
2754 unsigned int n, i;
2755 struct stat stb;
2756 __u32 *tptr;
2757
2758 if (ddf->currentconf) {
2759 add_to_super_ddf_bvd(st, dk, fd, devname);
2760 return 0;
2761 }
2762
2763 /* This is device numbered dk->number. We need to create
2764 * a phys_disk entry and a more detailed disk_data entry.
2765 */
2766 fstat(fd, &stb);
2767 n = find_unused_pde(ddf);
2768 if (n == DDF_NOTFOUND) {
2769 pr_err("%s: No free slot in array, cannot add disk\n",
2770 __func__);
2771 return 1;
2772 }
2773 pde = &ddf->phys->entries[n];
2774 get_dev_size(fd, NULL, &size);
2775 if (size <= 32*1024*1024) {
2776 pr_err("%s: device size must be at least 32MB\n",
2777 __func__);
2778 return 1;
2779 }
2780 size >>= 9;
2781
2782 if (posix_memalign((void**)&dd, 512,
2783 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2784 pr_err("%s could allocate buffer for new disk, aborting\n",
2785 __func__);
2786 return 1;
2787 }
2788 dd->major = major(stb.st_rdev);
2789 dd->minor = minor(stb.st_rdev);
2790 dd->devname = devname;
2791 dd->fd = fd;
2792 dd->spare = NULL;
2793
2794 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2795 now = time(0);
2796 tm = localtime(&now);
2797 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2798 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2799 tptr = (__u32 *)(dd->disk.guid + 16);
2800 *tptr++ = random32();
2801 *tptr = random32();
2802
2803 do {
2804 /* Cannot be bothered finding a CRC of some irrelevant details*/
2805 dd->disk.refnum._v32 = random32();
2806 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2807 i > 0; i--)
2808 if (be32_eq(ddf->phys->entries[i-1].refnum,
2809 dd->disk.refnum))
2810 break;
2811 } while (i > 0);
2812
2813 dd->disk.forced_ref = 1;
2814 dd->disk.forced_guid = 1;
2815 memset(dd->disk.vendor, ' ', 32);
2816 memcpy(dd->disk.vendor, "Linux", 5);
2817 memset(dd->disk.pad, 0xff, 442);
2818 for (i = 0; i < ddf->max_part ; i++)
2819 dd->vlist[i] = NULL;
2820
2821 dd->pdnum = n;
2822
2823 if (st->update_tail) {
2824 int len = (sizeof(struct phys_disk) +
2825 sizeof(struct phys_disk_entry));
2826 struct phys_disk *pd;
2827
2828 pd = xmalloc(len);
2829 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2830 pd->used_pdes = cpu_to_be16(n);
2831 pde = &pd->entries[0];
2832 dd->mdupdate = pd;
2833 } else
2834 ddf->phys->used_pdes = cpu_to_be16(
2835 1 + be16_to_cpu(ddf->phys->used_pdes));
2836
2837 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2838 pde->refnum = dd->disk.refnum;
2839 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2840 pde->state = cpu_to_be16(DDF_Online);
2841 dd->size = size;
2842 /*
2843 * If there is already a device in dlist, try to reserve the same
2844 * amount of workspace. Otherwise, use 32MB.
2845 * We checked disk size above already.
2846 */
2847 #define __calc_lba(new, old, lba, mb) do { \
2848 unsigned long long dif; \
2849 if ((old) != NULL) \
2850 dif = (old)->size - be64_to_cpu((old)->lba); \
2851 else \
2852 dif = (new)->size; \
2853 if ((new)->size > dif) \
2854 (new)->lba = cpu_to_be64((new)->size - dif); \
2855 else \
2856 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2857 } while (0)
2858 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2859 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2860 if (ddf->dlist == NULL ||
2861 be64_to_cpu(ddf->dlist->secondary_lba) != ~(__u64)0)
2862 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2863 _set_config_size(pde, dd);
2864
2865 sprintf(pde->path, "%17.17s","Information: nil") ;
2866 memset(pde->pad, 0xff, 6);
2867
2868 if (st->update_tail) {
2869 dd->next = ddf->add_list;
2870 ddf->add_list = dd;
2871 } else {
2872 dd->next = ddf->dlist;
2873 ddf->dlist = dd;
2874 ddf_set_updates_pending(ddf);
2875 }
2876
2877 return 0;
2878 }
2879
2880 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2881 {
2882 struct ddf_super *ddf = st->sb;
2883 struct dl *dl;
2884
2885 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2886 * disappeared from the container.
2887 * We need to arrange that it disappears from the metadata and
2888 * internal data structures too.
2889 * Most of the work is done by ddf_process_update which edits
2890 * the metadata and closes the file handle and attaches the memory
2891 * where free_updates will free it.
2892 */
2893 for (dl = ddf->dlist; dl ; dl = dl->next)
2894 if (dl->major == dk->major &&
2895 dl->minor == dk->minor)
2896 break;
2897 if (!dl)
2898 return -1;
2899
2900 if (st->update_tail) {
2901 int len = (sizeof(struct phys_disk) +
2902 sizeof(struct phys_disk_entry));
2903 struct phys_disk *pd;
2904
2905 pd = xmalloc(len);
2906 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2907 pd->used_pdes = cpu_to_be16(dl->pdnum);
2908 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2909 append_metadata_update(st, pd, len);
2910 }
2911 return 0;
2912 }
2913 #endif
2914
2915 /*
2916 * This is the write_init_super method for a ddf container. It is
2917 * called when creating a container or adding another device to a
2918 * container.
2919 */
2920 #define NULL_CONF_SZ 4096
2921
2922 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2923 {
2924 unsigned long long sector;
2925 struct ddf_header *header;
2926 int fd, i, n_config, conf_size, buf_size;
2927 int ret = 0;
2928 char *conf;
2929
2930 fd = d->fd;
2931
2932 switch (type) {
2933 case DDF_HEADER_PRIMARY:
2934 header = &ddf->primary;
2935 sector = be64_to_cpu(header->primary_lba);
2936 break;
2937 case DDF_HEADER_SECONDARY:
2938 header = &ddf->secondary;
2939 sector = be64_to_cpu(header->secondary_lba);
2940 break;
2941 default:
2942 return 0;
2943 }
2944 if (sector == ~(__u64)0)
2945 return 0;
2946
2947 header->type = type;
2948 header->openflag = 1;
2949 header->crc = calc_crc(header, 512);
2950
2951 lseek64(fd, sector<<9, 0);
2952 if (write(fd, header, 512) < 0)
2953 goto out;
2954
2955 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2956 if (write(fd, &ddf->controller, 512) < 0)
2957 goto out;
2958
2959 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2960 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2961 goto out;
2962 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2963 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2964 goto out;
2965
2966 /* Now write lots of config records. */
2967 n_config = ddf->max_part;
2968 conf_size = ddf->conf_rec_len * 512;
2969 conf = ddf->conf;
2970 buf_size = conf_size * (n_config + 1);
2971 if (!conf) {
2972 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
2973 goto out;
2974 ddf->conf = conf;
2975 }
2976 for (i = 0 ; i <= n_config ; i++) {
2977 struct vcl *c;
2978 struct vd_config *vdc = NULL;
2979 if (i == n_config) {
2980 c = (struct vcl *)d->spare;
2981 if (c)
2982 vdc = &c->conf;
2983 } else {
2984 unsigned int dummy;
2985 c = d->vlist[i];
2986 if (c)
2987 get_pd_index_from_refnum(
2988 c, d->disk.refnum,
2989 ddf->mppe,
2990 (const struct vd_config **)&vdc,
2991 &dummy);
2992 }
2993 if (c) {
2994 dprintf("writing conf record %i on disk %08x for %s/%u\n",
2995 i, be32_to_cpu(d->disk.refnum),
2996 guid_str(vdc->guid),
2997 vdc->sec_elmnt_seq);
2998 vdc->seqnum = header->seq;
2999 vdc->crc = calc_crc(vdc, conf_size);
3000 memcpy(conf + i*conf_size, vdc, conf_size);
3001 } else
3002 memset(conf + i*conf_size, 0xff, conf_size);
3003 }
3004 if (write(fd, conf, buf_size) != buf_size)
3005 goto out;
3006
3007 d->disk.crc = calc_crc(&d->disk, 512);
3008 if (write(fd, &d->disk, 512) < 0)
3009 goto out;
3010
3011 ret = 1;
3012 out:
3013 header->openflag = 0;
3014 header->crc = calc_crc(header, 512);
3015
3016 lseek64(fd, sector<<9, 0);
3017 if (write(fd, header, 512) < 0)
3018 ret = 0;
3019
3020 return ret;
3021 }
3022
3023 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
3024 {
3025 unsigned long long size;
3026 int fd = d->fd;
3027 if (fd < 0)
3028 return 0;
3029
3030 /* We need to fill in the primary, (secondary) and workspace
3031 * lba's in the headers, set their checksums,
3032 * Also checksum phys, virt....
3033 *
3034 * Then write everything out, finally the anchor is written.
3035 */
3036 get_dev_size(fd, NULL, &size);
3037 size /= 512;
3038 if (be64_to_cpu(d->workspace_lba) != 0ULL)
3039 ddf->anchor.workspace_lba = d->workspace_lba;
3040 else
3041 ddf->anchor.workspace_lba =
3042 cpu_to_be64(size - 32*1024*2);
3043 if (be64_to_cpu(d->primary_lba) != 0ULL)
3044 ddf->anchor.primary_lba = d->primary_lba;
3045 else
3046 ddf->anchor.primary_lba =
3047 cpu_to_be64(size - 16*1024*2);
3048 if (be64_to_cpu(d->secondary_lba) != 0ULL)
3049 ddf->anchor.secondary_lba = d->secondary_lba;
3050 else
3051 ddf->anchor.secondary_lba =
3052 cpu_to_be64(size - 32*1024*2);
3053 ddf->anchor.seq = ddf->active->seq;
3054 memcpy(&ddf->primary, &ddf->anchor, 512);
3055 memcpy(&ddf->secondary, &ddf->anchor, 512);
3056
3057 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3058 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3059 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3060
3061 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3062 return 0;
3063
3064 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3065 return 0;
3066
3067 lseek64(fd, (size-1)*512, SEEK_SET);
3068 if (write(fd, &ddf->anchor, 512) < 0)
3069 return 0;
3070
3071 return 1;
3072 }
3073
3074 #ifndef MDASSEMBLE
3075 static int __write_init_super_ddf(struct supertype *st)
3076 {
3077 struct ddf_super *ddf = st->sb;
3078 struct dl *d;
3079 int attempts = 0;
3080 int successes = 0;
3081
3082 pr_state(ddf, __func__);
3083
3084 /* try to write updated metadata,
3085 * if we catch a failure move on to the next disk
3086 */
3087 for (d = ddf->dlist; d; d=d->next) {
3088 attempts++;
3089 successes += _write_super_to_disk(ddf, d);
3090 }
3091
3092 return attempts != successes;
3093 }
3094
3095 static int write_init_super_ddf(struct supertype *st)
3096 {
3097 struct ddf_super *ddf = st->sb;
3098 struct vcl *currentconf = ddf->currentconf;
3099
3100 /* we are done with currentconf reset it to point st at the container */
3101 ddf->currentconf = NULL;
3102
3103 if (st->update_tail) {
3104 /* queue the virtual_disk and vd_config as metadata updates */
3105 struct virtual_disk *vd;
3106 struct vd_config *vc;
3107 int len, tlen;
3108 unsigned int i;
3109
3110 if (!currentconf) {
3111 int len = (sizeof(struct phys_disk) +
3112 sizeof(struct phys_disk_entry));
3113
3114 /* adding a disk to the container. */
3115 if (!ddf->add_list)
3116 return 0;
3117
3118 append_metadata_update(st, ddf->add_list->mdupdate, len);
3119 ddf->add_list->mdupdate = NULL;
3120 return 0;
3121 }
3122
3123 /* Newly created VD */
3124
3125 /* First the virtual disk. We have a slightly fake header */
3126 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3127 vd = xmalloc(len);
3128 *vd = *ddf->virt;
3129 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3130 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3131 append_metadata_update(st, vd, len);
3132
3133 /* Then the vd_config */
3134 len = ddf->conf_rec_len * 512;
3135 tlen = len * currentconf->conf.sec_elmnt_count;
3136 vc = xmalloc(tlen);
3137 memcpy(vc, &currentconf->conf, len);
3138 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3139 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3140 len);
3141 append_metadata_update(st, vc, tlen);
3142
3143 /* FIXME I need to close the fds! */
3144 return 0;
3145 } else {
3146 struct dl *d;
3147 if (!currentconf)
3148 for (d = ddf->dlist; d; d=d->next)
3149 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3150 return __write_init_super_ddf(st);
3151 }
3152 }
3153
3154 #endif
3155
3156 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3157 unsigned long long data_offset)
3158 {
3159 /* We must reserve the last 32Meg */
3160 if (devsize <= 32*1024*2)
3161 return 0;
3162 return devsize - 32*1024*2;
3163 }
3164
3165 #ifndef MDASSEMBLE
3166
3167 static int reserve_space(struct supertype *st, int raiddisks,
3168 unsigned long long size, int chunk,
3169 unsigned long long *freesize)
3170 {
3171 /* Find 'raiddisks' spare extents at least 'size' big (but
3172 * only caring about multiples of 'chunk') and remember
3173 * them.
3174 * If the cannot be found, fail.
3175 */
3176 struct dl *dl;
3177 struct ddf_super *ddf = st->sb;
3178 int cnt = 0;
3179
3180 for (dl = ddf->dlist; dl ; dl=dl->next) {
3181 dl->raiddisk = -1;
3182 dl->esize = 0;
3183 }
3184 /* Now find largest extent on each device */
3185 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3186 struct extent *e = get_extents(ddf, dl);
3187 unsigned long long pos = 0;
3188 int i = 0;
3189 int found = 0;
3190 unsigned long long minsize = size;
3191
3192 if (size == 0)
3193 minsize = chunk;
3194
3195 if (!e)
3196 continue;
3197 do {
3198 unsigned long long esize;
3199 esize = e[i].start - pos;
3200 if (esize >= minsize) {
3201 found = 1;
3202 minsize = esize;
3203 }
3204 pos = e[i].start + e[i].size;
3205 i++;
3206 } while (e[i-1].size);
3207 if (found) {
3208 cnt++;
3209 dl->esize = minsize;
3210 }
3211 free(e);
3212 }
3213 if (cnt < raiddisks) {
3214 pr_err("not enough devices with space to create array.\n");
3215 return 0; /* No enough free spaces large enough */
3216 }
3217 if (size == 0) {
3218 /* choose the largest size of which there are at least 'raiddisk' */
3219 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3220 struct dl *dl2;
3221 if (dl->esize <= size)
3222 continue;
3223 /* This is bigger than 'size', see if there are enough */
3224 cnt = 0;
3225 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3226 if (dl2->esize >= dl->esize)
3227 cnt++;
3228 if (cnt >= raiddisks)
3229 size = dl->esize;
3230 }
3231 if (chunk) {
3232 size = size / chunk;
3233 size *= chunk;
3234 }
3235 *freesize = size;
3236 if (size < 32) {
3237 pr_err("not enough spare devices to create array.\n");
3238 return 0;
3239 }
3240 }
3241 /* We have a 'size' of which there are enough spaces.
3242 * We simply do a first-fit */
3243 cnt = 0;
3244 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3245 if (dl->esize < size)
3246 continue;
3247
3248 dl->raiddisk = cnt;
3249 cnt++;
3250 }
3251 return 1;
3252 }
3253
3254 static int
3255 validate_geometry_ddf_container(struct supertype *st,
3256 int level, int layout, int raiddisks,
3257 int chunk, unsigned long long size,
3258 unsigned long long data_offset,
3259 char *dev, unsigned long long *freesize,
3260 int verbose);
3261
3262 static int validate_geometry_ddf_bvd(struct supertype *st,
3263 int level, int layout, int raiddisks,
3264 int *chunk, unsigned long long size,
3265 unsigned long long data_offset,
3266 char *dev, unsigned long long *freesize,
3267 int verbose);
3268
3269 static int validate_geometry_ddf(struct supertype *st,
3270 int level, int layout, int raiddisks,
3271 int *chunk, unsigned long long size,
3272 unsigned long long data_offset,
3273 char *dev, unsigned long long *freesize,
3274 int verbose)
3275 {
3276 int fd;
3277 struct mdinfo *sra;
3278 int cfd;
3279
3280 /* ddf potentially supports lots of things, but it depends on
3281 * what devices are offered (and maybe kernel version?)
3282 * If given unused devices, we will make a container.
3283 * If given devices in a container, we will make a BVD.
3284 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3285 */
3286
3287 if (*chunk == UnSet)
3288 *chunk = DEFAULT_CHUNK;
3289
3290 if (level == -1000000) level = LEVEL_CONTAINER;
3291 if (level == LEVEL_CONTAINER) {
3292 /* Must be a fresh device to add to a container */
3293 return validate_geometry_ddf_container(st, level, layout,
3294 raiddisks, *chunk,
3295 size, data_offset, dev,
3296 freesize,
3297 verbose);
3298 }
3299
3300 if (!dev) {
3301 mdu_array_info_t array = {
3302 .level = level, .layout = layout,
3303 .raid_disks = raiddisks
3304 };
3305 struct vd_config conf;
3306 if (layout_md2ddf(&array, &conf) == -1) {
3307 if (verbose)
3308 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3309 level, layout, raiddisks);
3310 return 0;
3311 }
3312 /* Should check layout? etc */
3313
3314 if (st->sb && freesize) {
3315 /* --create was given a container to create in.
3316 * So we need to check that there are enough
3317 * free spaces and return the amount of space.
3318 * We may as well remember which drives were
3319 * chosen so that add_to_super/getinfo_super
3320 * can return them.
3321 */
3322 return reserve_space(st, raiddisks, size, *chunk, freesize);
3323 }
3324 return 1;
3325 }
3326
3327 if (st->sb) {
3328 /* A container has already been opened, so we are
3329 * creating in there. Maybe a BVD, maybe an SVD.
3330 * Should make a distinction one day.
3331 */
3332 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3333 chunk, size, data_offset, dev,
3334 freesize,
3335 verbose);
3336 }
3337 /* This is the first device for the array.
3338 * If it is a container, we read it in and do automagic allocations,
3339 * no other devices should be given.
3340 * Otherwise it must be a member device of a container, and we
3341 * do manual allocation.
3342 * Later we should check for a BVD and make an SVD.
3343 */
3344 fd = open(dev, O_RDONLY|O_EXCL, 0);
3345 if (fd >= 0) {
3346 sra = sysfs_read(fd, NULL, GET_VERSION);
3347 close(fd);
3348 if (sra && sra->array.major_version == -1 &&
3349 strcmp(sra->text_version, "ddf") == 0) {
3350
3351 /* load super */
3352 /* find space for 'n' devices. */
3353 /* remember the devices */
3354 /* Somehow return the fact that we have enough */
3355 }
3356
3357 if (verbose)
3358 pr_err("ddf: Cannot create this array "
3359 "on device %s - a container is required.\n",
3360 dev);
3361 return 0;
3362 }
3363 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3364 if (verbose)
3365 pr_err("ddf: Cannot open %s: %s\n",
3366 dev, strerror(errno));
3367 return 0;
3368 }
3369 /* Well, it is in use by someone, maybe a 'ddf' container. */
3370 cfd = open_container(fd);
3371 if (cfd < 0) {
3372 close(fd);
3373 if (verbose)
3374 pr_err("ddf: Cannot use %s: %s\n",
3375 dev, strerror(EBUSY));
3376 return 0;
3377 }
3378 sra = sysfs_read(cfd, NULL, GET_VERSION);
3379 close(fd);
3380 if (sra && sra->array.major_version == -1 &&
3381 strcmp(sra->text_version, "ddf") == 0) {
3382 /* This is a member of a ddf container. Load the container
3383 * and try to create a bvd
3384 */
3385 struct ddf_super *ddf;
3386 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3387 st->sb = ddf;
3388 strcpy(st->container_devnm, fd2devnm(cfd));
3389 close(cfd);
3390 return validate_geometry_ddf_bvd(st, level, layout,
3391 raiddisks, chunk, size,
3392 data_offset,
3393 dev, freesize,
3394 verbose);
3395 }
3396 close(cfd);
3397 } else /* device may belong to a different container */
3398 return 0;
3399
3400 return 1;
3401 }
3402
3403 static int
3404 validate_geometry_ddf_container(struct supertype *st,
3405 int level, int layout, int raiddisks,
3406 int chunk, unsigned long long size,
3407 unsigned long long data_offset,
3408 char *dev, unsigned long long *freesize,
3409 int verbose)
3410 {
3411 int fd;
3412 unsigned long long ldsize;
3413
3414 if (level != LEVEL_CONTAINER)
3415 return 0;
3416 if (!dev)
3417 return 1;
3418
3419 fd = open(dev, O_RDONLY|O_EXCL, 0);
3420 if (fd < 0) {
3421 if (verbose)
3422 pr_err("ddf: Cannot open %s: %s\n",
3423 dev, strerror(errno));
3424 return 0;
3425 }
3426 if (!get_dev_size(fd, dev, &ldsize)) {
3427 close(fd);
3428 return 0;
3429 }
3430 close(fd);
3431
3432 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3433 if (*freesize == 0)
3434 return 0;
3435
3436 return 1;
3437 }
3438
3439 static int validate_geometry_ddf_bvd(struct supertype *st,
3440 int level, int layout, int raiddisks,
3441 int *chunk, unsigned long long size,
3442 unsigned long long data_offset,
3443 char *dev, unsigned long long *freesize,
3444 int verbose)
3445 {
3446 struct stat stb;
3447 struct ddf_super *ddf = st->sb;
3448 struct dl *dl;
3449 unsigned long long pos = 0;
3450 unsigned long long maxsize;
3451 struct extent *e;
3452 int i;
3453 /* ddf/bvd supports lots of things, but not containers */
3454 if (level == LEVEL_CONTAINER) {
3455 if (verbose)
3456 pr_err("DDF cannot create a container within an container\n");
3457 return 0;
3458 }
3459 /* We must have the container info already read in. */
3460 if (!ddf)
3461 return 0;
3462
3463 if (!dev) {
3464 /* General test: make sure there is space for
3465 * 'raiddisks' device extents of size 'size'.
3466 */
3467 unsigned long long minsize = size;
3468 int dcnt = 0;
3469 if (minsize == 0)
3470 minsize = 8;
3471 for (dl = ddf->dlist; dl ; dl = dl->next)
3472 {
3473 int found = 0;
3474 pos = 0;
3475
3476 i = 0;
3477 e = get_extents(ddf, dl);
3478 if (!e) continue;
3479 do {
3480 unsigned long long esize;
3481 esize = e[i].start - pos;
3482 if (esize >= minsize)
3483 found = 1;
3484 pos = e[i].start + e[i].size;
3485 i++;
3486 } while (e[i-1].size);
3487 if (found)
3488 dcnt++;
3489 free(e);
3490 }
3491 if (dcnt < raiddisks) {
3492 if (verbose)
3493 pr_err("ddf: Not enough devices with "
3494 "space for this array (%d < %d)\n",
3495 dcnt, raiddisks);
3496 return 0;
3497 }
3498 return 1;
3499 }
3500 /* This device must be a member of the set */
3501 if (stat(dev, &stb) < 0)
3502 return 0;
3503 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3504 return 0;
3505 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3506 if (dl->major == (int)major(stb.st_rdev) &&
3507 dl->minor == (int)minor(stb.st_rdev))
3508 break;
3509 }
3510 if (!dl) {
3511 if (verbose)
3512 pr_err("ddf: %s is not in the "
3513 "same DDF set\n",
3514 dev);
3515 return 0;
3516 }
3517 e = get_extents(ddf, dl);
3518 maxsize = 0;
3519 i = 0;
3520 if (e) do {
3521 unsigned long long esize;
3522 esize = e[i].start - pos;
3523 if (esize >= maxsize)
3524 maxsize = esize;
3525 pos = e[i].start + e[i].size;
3526 i++;
3527 } while (e[i-1].size);
3528 *freesize = maxsize;
3529 // FIXME here I am
3530
3531 return 1;
3532 }
3533
3534 static int load_super_ddf_all(struct supertype *st, int fd,
3535 void **sbp, char *devname)
3536 {
3537 struct mdinfo *sra;
3538 struct ddf_super *super;
3539 struct mdinfo *sd, *best = NULL;
3540 int bestseq = 0;
3541 int seq;
3542 char nm[20];
3543 int dfd;
3544
3545 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3546 if (!sra)
3547 return 1;
3548 if (sra->array.major_version != -1 ||
3549 sra->array.minor_version != -2 ||
3550 strcmp(sra->text_version, "ddf") != 0)
3551 return 1;
3552
3553 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3554 return 1;
3555 memset(super, 0, sizeof(*super));
3556
3557 /* first, try each device, and choose the best ddf */
3558 for (sd = sra->devs ; sd ; sd = sd->next) {
3559 int rv;
3560 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3561 dfd = dev_open(nm, O_RDONLY);
3562 if (dfd < 0)
3563 return 2;
3564 rv = load_ddf_headers(dfd, super, NULL);
3565 close(dfd);
3566 if (rv == 0) {
3567 seq = be32_to_cpu(super->active->seq);
3568 if (super->active->openflag)
3569 seq--;
3570 if (!best || seq > bestseq) {
3571 bestseq = seq;
3572 best = sd;
3573 }
3574 }
3575 }
3576 if (!best)
3577 return 1;
3578 /* OK, load this ddf */
3579 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3580 dfd = dev_open(nm, O_RDONLY);
3581 if (dfd < 0)
3582 return 1;
3583 load_ddf_headers(dfd, super, NULL);
3584 load_ddf_global(dfd, super, NULL);
3585 close(dfd);
3586 /* Now we need the device-local bits */
3587 for (sd = sra->devs ; sd ; sd = sd->next) {
3588 int rv;
3589
3590 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3591 dfd = dev_open(nm, O_RDWR);
3592 if (dfd < 0)
3593 return 2;
3594 rv = load_ddf_headers(dfd, super, NULL);
3595 if (rv == 0)
3596 rv = load_ddf_local(dfd, super, NULL, 1);
3597 if (rv)
3598 return 1;
3599 }
3600
3601 *sbp = super;
3602 if (st->ss == NULL) {
3603 st->ss = &super_ddf;
3604 st->minor_version = 0;
3605 st->max_devs = 512;
3606 }
3607 strcpy(st->container_devnm, fd2devnm(fd));
3608 return 0;
3609 }
3610
3611 static int load_container_ddf(struct supertype *st, int fd,
3612 char *devname)
3613 {
3614 return load_super_ddf_all(st, fd, &st->sb, devname);
3615 }
3616
3617 #endif /* MDASSEMBLE */
3618
3619 static int check_secondary(const struct vcl *vc)
3620 {
3621 const struct vd_config *conf = &vc->conf;
3622 int i;
3623
3624 /* The only DDF secondary RAID level md can support is
3625 * RAID 10, if the stripe sizes and Basic volume sizes
3626 * are all equal.
3627 * Other configurations could in theory be supported by exposing
3628 * the BVDs to user space and using device mapper for the secondary
3629 * mapping. So far we don't support that.
3630 */
3631
3632 __u64 sec_elements[4] = {0, 0, 0, 0};
3633 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3634 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3635
3636 if (vc->other_bvds == NULL) {
3637 pr_err("No BVDs for secondary RAID found\n");
3638 return -1;
3639 }
3640 if (conf->prl != DDF_RAID1) {
3641 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3642 return -1;
3643 }
3644 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3645 pr_err("Secondary RAID level %d is unsupported\n",
3646 conf->srl);
3647 return -1;
3648 }
3649 __set_sec_seen(conf->sec_elmnt_seq);
3650 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3651 const struct vd_config *bvd = vc->other_bvds[i];
3652 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3653 continue;
3654 if (bvd->srl != conf->srl) {
3655 pr_err("Inconsistent secondary RAID level across BVDs\n");
3656 return -1;
3657 }
3658 if (bvd->prl != conf->prl) {
3659 pr_err("Different RAID levels for BVDs are unsupported\n");
3660 return -1;
3661 }
3662 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3663 pr_err("All BVDs must have the same number of primary elements\n");
3664 return -1;
3665 }
3666 if (bvd->chunk_shift != conf->chunk_shift) {
3667 pr_err("Different strip sizes for BVDs are unsupported\n");
3668 return -1;
3669 }
3670 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3671 pr_err("Different BVD sizes are unsupported\n");
3672 return -1;
3673 }
3674 __set_sec_seen(bvd->sec_elmnt_seq);
3675 }
3676 for (i = 0; i < conf->sec_elmnt_count; i++) {
3677 if (!__was_sec_seen(i)) {
3678 pr_err("BVD %d is missing\n", i);
3679 return -1;
3680 }
3681 }
3682 return 0;
3683 }
3684
3685 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3686 be32 refnum, unsigned int nmax,
3687 const struct vd_config **bvd,
3688 unsigned int *idx)
3689 {
3690 unsigned int i, j, n, sec, cnt;
3691
3692 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3693 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3694
3695 for (i = 0, j = 0 ; i < nmax ; i++) {
3696 /* j counts valid entries for this BVD */
3697 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3698 j++;
3699 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3700 *bvd = &vc->conf;
3701 *idx = i;
3702 return sec * cnt + j - 1;
3703 }
3704 }
3705 if (vc->other_bvds == NULL)
3706 goto bad;
3707
3708 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3709 struct vd_config *vd = vc->other_bvds[n-1];
3710 sec = vd->sec_elmnt_seq;
3711 if (sec == DDF_UNUSED_BVD)
3712 continue;
3713 for (i = 0, j = 0 ; i < nmax ; i++) {
3714 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3715 j++;
3716 if (be32_eq(vd->phys_refnum[i], refnum)) {
3717 *bvd = vd;
3718 *idx = i;
3719 return sec * cnt + j - 1;
3720 }
3721 }
3722 }
3723 bad:
3724 *bvd = NULL;
3725 return DDF_NOTFOUND;
3726 }
3727
3728 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3729 {
3730 /* Given a container loaded by load_super_ddf_all,
3731 * extract information about all the arrays into
3732 * an mdinfo tree.
3733 *
3734 * For each vcl in conflist: create an mdinfo, fill it in,
3735 * then look for matching devices (phys_refnum) in dlist
3736 * and create appropriate device mdinfo.
3737 */
3738 struct ddf_super *ddf = st->sb;
3739 struct mdinfo *rest = NULL;
3740 struct vcl *vc;
3741
3742 for (vc = ddf->conflist ; vc ; vc=vc->next)
3743 {
3744 unsigned int i;
3745 struct mdinfo *this;
3746 char *ep;
3747 __u32 *cptr;
3748 unsigned int pd;
3749
3750 if (subarray &&
3751 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3752 *ep != '\0'))
3753 continue;
3754
3755 if (vc->conf.sec_elmnt_count > 1) {
3756 if (check_secondary(vc) != 0)
3757 continue;
3758 }
3759
3760 this = xcalloc(1, sizeof(*this));
3761 this->next = rest;
3762 rest = this;
3763
3764 if (layout_ddf2md(&vc->conf, &this->array))
3765 continue;
3766 this->array.md_minor = -1;
3767 this->array.major_version = -1;
3768 this->array.minor_version = -2;
3769 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3770 cptr = (__u32 *)(vc->conf.guid + 16);
3771 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3772 this->array.utime = DECADE +
3773 be32_to_cpu(vc->conf.timestamp);
3774 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3775
3776 i = vc->vcnum;
3777 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3778 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3779 DDF_init_full) {
3780 this->array.state = 0;
3781 this->resync_start = 0;
3782 } else {
3783 this->array.state = 1;
3784 this->resync_start = MaxSector;
3785 }
3786 _ddf_array_name(this->name, ddf, i);
3787 memset(this->uuid, 0, sizeof(this->uuid));
3788 this->component_size = be64_to_cpu(vc->conf.blocks);
3789 this->array.size = this->component_size / 2;
3790 this->container_member = i;
3791
3792 ddf->currentconf = vc;
3793 uuid_from_super_ddf(st, this->uuid);
3794 if (!subarray)
3795 ddf->currentconf = NULL;
3796
3797 sprintf(this->text_version, "/%s/%d",
3798 st->container_devnm, this->container_member);
3799
3800 for (pd = 0; pd < be16_to_cpu(ddf->phys->used_pdes); pd++) {
3801 struct mdinfo *dev;
3802 struct dl *d;
3803 const struct vd_config *bvd;
3804 unsigned int iphys;
3805 int stt;
3806
3807 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3808 == 0xFFFFFFFF)
3809 continue;
3810
3811 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3812 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3813 != DDF_Online)
3814 continue;
3815
3816 i = get_pd_index_from_refnum(
3817 vc, ddf->phys->entries[pd].refnum,
3818 ddf->mppe, &bvd, &iphys);
3819 if (i == DDF_NOTFOUND)
3820 continue;
3821
3822 this->array.working_disks++;
3823
3824 for (d = ddf->dlist; d ; d=d->next)
3825 if (be32_eq(d->disk.refnum,
3826 ddf->phys->entries[pd].refnum))
3827 break;
3828 if (d == NULL)
3829 /* Haven't found that one yet, maybe there are others */
3830 continue;
3831
3832 dev = xcalloc(1, sizeof(*dev));
3833 dev->next = this->devs;
3834 this->devs = dev;
3835
3836 dev->disk.number = be32_to_cpu(d->disk.refnum);
3837 dev->disk.major = d->major;
3838 dev->disk.minor = d->minor;
3839 dev->disk.raid_disk = i;
3840 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3841 dev->recovery_start = MaxSector;
3842
3843 dev->events = be32_to_cpu(ddf->primary.seq);
3844 dev->data_offset =
3845 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3846 dev->component_size = be64_to_cpu(bvd->blocks);
3847 if (d->devname)
3848 strcpy(dev->name, d->devname);
3849 }
3850 }
3851 return rest;
3852 }
3853
3854 static int store_super_ddf(struct supertype *st, int fd)
3855 {
3856 struct ddf_super *ddf = st->sb;
3857 unsigned long long dsize;
3858 void *buf;
3859 int rc;
3860
3861 if (!ddf)
3862 return 1;
3863
3864 if (!get_dev_size(fd, NULL, &dsize))
3865 return 1;
3866
3867 if (ddf->dlist || ddf->conflist) {
3868 struct stat sta;
3869 struct dl *dl;
3870 int ofd, ret;
3871
3872 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3873 pr_err("%s: file descriptor for invalid device\n",
3874 __func__);
3875 return 1;
3876 }
3877 for (dl = ddf->dlist; dl; dl = dl->next)
3878 if (dl->major == (int)major(sta.st_rdev) &&
3879 dl->minor == (int)minor(sta.st_rdev))
3880 break;
3881 if (!dl) {
3882 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3883 (int)major(sta.st_rdev),
3884 (int)minor(sta.st_rdev));
3885 return 1;
3886 }
3887 ofd = dl->fd;
3888 dl->fd = fd;
3889 ret = (_write_super_to_disk(ddf, dl) != 1);
3890 dl->fd = ofd;
3891 return ret;
3892 }
3893
3894 if (posix_memalign(&buf, 512, 512) != 0)
3895 return 1;
3896 memset(buf, 0, 512);
3897
3898 lseek64(fd, dsize-512, 0);
3899 rc = write(fd, buf, 512);
3900 free(buf);
3901 if (rc < 0)
3902 return 1;
3903 return 0;
3904 }
3905
3906 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3907 {
3908 /*
3909 * return:
3910 * 0 same, or first was empty, and second was copied
3911 * 1 second had wrong number
3912 * 2 wrong uuid
3913 * 3 wrong other info
3914 */
3915 struct ddf_super *first = st->sb;
3916 struct ddf_super *second = tst->sb;
3917 struct dl *dl1, *dl2;
3918 struct vcl *vl1, *vl2;
3919 unsigned int max_vds, max_pds, pd, vd;
3920
3921 if (!first) {
3922 st->sb = tst->sb;
3923 tst->sb = NULL;
3924 return 0;
3925 }
3926
3927 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3928 return 2;
3929
3930 if (!be32_eq(first->active->seq, second->active->seq)) {
3931 dprintf("%s: sequence number mismatch %u<->%u\n", __func__,
3932 be32_to_cpu(first->active->seq),
3933 be32_to_cpu(second->active->seq));
3934 return 3;
3935 }
3936 if (first->max_part != second->max_part ||
3937 !be16_eq(first->phys->used_pdes, second->phys->used_pdes) ||
3938 !be16_eq(first->virt->populated_vdes,
3939 second->virt->populated_vdes)) {
3940 dprintf("%s: PD/VD number mismatch\n", __func__);
3941 return 3;
3942 }
3943
3944 max_pds = be16_to_cpu(first->phys->used_pdes);
3945 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3946 for (pd = 0; pd < max_pds; pd++)
3947 if (be32_eq(first->phys->entries[pd].refnum,
3948 dl2->disk.refnum))
3949 break;
3950 if (pd == max_pds) {
3951 dprintf("%s: no match for disk %08x\n", __func__,
3952 be32_to_cpu(dl2->disk.refnum));
3953 return 3;
3954 }
3955 }
3956
3957 max_vds = be16_to_cpu(first->active->max_vd_entries);
3958 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3959 if (!be32_eq(vl2->conf.magic, DDF_VD_CONF_MAGIC))
3960 continue;
3961 for (vd = 0; vd < max_vds; vd++)
3962 if (!memcmp(first->virt->entries[vd].guid,
3963 vl2->conf.guid, DDF_GUID_LEN))
3964 break;
3965 if (vd == max_vds) {
3966 dprintf("%s: no match for VD config\n", __func__);
3967 return 3;
3968 }
3969 }
3970 /* FIXME should I look at anything else? */
3971
3972 /*
3973 At this point we are fairly sure that the meta data matches.
3974 But the new disk may contain additional local data.
3975 Add it to the super block.
3976 */
3977 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3978 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3979 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3980 DDF_GUID_LEN))
3981 break;
3982 if (vl1) {
3983 if (vl1->other_bvds != NULL &&
3984 vl1->conf.sec_elmnt_seq !=
3985 vl2->conf.sec_elmnt_seq) {
3986 dprintf("%s: adding BVD %u\n", __func__,
3987 vl2->conf.sec_elmnt_seq);
3988 add_other_bvd(vl1, &vl2->conf,
3989 first->conf_rec_len*512);
3990 }
3991 continue;
3992 }
3993
3994 if (posix_memalign((void **)&vl1, 512,
3995 (first->conf_rec_len*512 +
3996 offsetof(struct vcl, conf))) != 0) {
3997 pr_err("%s could not allocate vcl buf\n",
3998 __func__);
3999 return 3;
4000 }
4001
4002 vl1->next = first->conflist;
4003 vl1->block_sizes = NULL;
4004 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
4005 if (alloc_other_bvds(first, vl1) != 0) {
4006 pr_err("%s could not allocate other bvds\n",
4007 __func__);
4008 free(vl1);
4009 return 3;
4010 }
4011 for (vd = 0; vd < max_vds; vd++)
4012 if (!memcmp(first->virt->entries[vd].guid,
4013 vl1->conf.guid, DDF_GUID_LEN))
4014 break;
4015 vl1->vcnum = vd;
4016 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
4017 first->conflist = vl1;
4018 }
4019
4020 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
4021 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
4022 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
4023 break;
4024 if (dl1)
4025 continue;
4026
4027 if (posix_memalign((void **)&dl1, 512,
4028 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
4029 != 0) {
4030 pr_err("%s could not allocate disk info buffer\n",
4031 __func__);
4032 return 3;
4033 }
4034 memcpy(dl1, dl2, sizeof(*dl1));
4035 dl1->mdupdate = NULL;
4036 dl1->next = first->dlist;
4037 dl1->fd = -1;
4038 for (pd = 0; pd < max_pds; pd++)
4039 if (be32_eq(first->phys->entries[pd].refnum,
4040 dl1->disk.refnum))
4041 break;
4042 dl1->pdnum = pd;
4043 if (dl2->spare) {
4044 if (posix_memalign((void **)&dl1->spare, 512,
4045 first->conf_rec_len*512) != 0) {
4046 pr_err("%s could not allocate spare info buf\n",
4047 __func__);
4048 return 3;
4049 }
4050 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
4051 }
4052 for (vd = 0 ; vd < first->max_part ; vd++) {
4053 if (!dl2->vlist[vd]) {
4054 dl1->vlist[vd] = NULL;
4055 continue;
4056 }
4057 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
4058 if (!memcmp(vl1->conf.guid,
4059 dl2->vlist[vd]->conf.guid,
4060 DDF_GUID_LEN))
4061 break;
4062 dl1->vlist[vd] = vl1;
4063 }
4064 }
4065 first->dlist = dl1;
4066 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
4067 be32_to_cpu(dl1->disk.refnum));
4068 }
4069
4070 return 0;
4071 }
4072
4073 #ifndef MDASSEMBLE
4074 /*
4075 * A new array 'a' has been started which claims to be instance 'inst'
4076 * within container 'c'.
4077 * We need to confirm that the array matches the metadata in 'c' so
4078 * that we don't corrupt any metadata.
4079 */
4080 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4081 {
4082 struct ddf_super *ddf = c->sb;
4083 int n = atoi(inst);
4084 struct mdinfo *dev;
4085 struct dl *dl;
4086 static const char faulty[] = "faulty";
4087
4088 if (all_ff(ddf->virt->entries[n].guid)) {
4089 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4090 return -ENODEV;
4091 }
4092 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
4093 guid_str(ddf->virt->entries[n].guid));
4094 for (dev = a->info.devs; dev; dev = dev->next) {
4095 for (dl = ddf->dlist; dl; dl = dl->next)
4096 if (dl->major == dev->disk.major &&
4097 dl->minor == dev->disk.minor)
4098 break;
4099 if (!dl) {
4100 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4101 __func__, dev->disk.major, dev->disk.minor, n);
4102 return -1;
4103 }
4104 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4105 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4106 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4107 __func__, n, dl->major, dl->minor,
4108 be16_to_cpu(
4109 ddf->phys->entries[dl->pdnum].state));
4110 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4111 sizeof(faulty) - 1)
4112 pr_err("Write to state_fd failed\n");
4113 dev->curr_state = DS_FAULTY;
4114 }
4115 }
4116 a->info.container_member = n;
4117 return 0;
4118 }
4119
4120 /*
4121 * The array 'a' is to be marked clean in the metadata.
4122 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4123 * clean up to the point (in sectors). If that cannot be recorded in the
4124 * metadata, then leave it as dirty.
4125 *
4126 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4127 * !global! virtual_disk.virtual_entry structure.
4128 */
4129 static int ddf_set_array_state(struct active_array *a, int consistent)
4130 {
4131 struct ddf_super *ddf = a->container->sb;
4132 int inst = a->info.container_member;
4133 int old = ddf->virt->entries[inst].state;
4134 if (consistent == 2) {
4135 /* Should check if a recovery should be started FIXME */
4136 consistent = 1;
4137 if (!is_resync_complete(&a->info))
4138 consistent = 0;
4139 }
4140 if (consistent)
4141 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4142 else
4143 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4144 if (old != ddf->virt->entries[inst].state)
4145 ddf_set_updates_pending(ddf);
4146
4147 old = ddf->virt->entries[inst].init_state;
4148 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4149 if (is_resync_complete(&a->info))
4150 ddf->virt->entries[inst].init_state |= DDF_init_full;
4151 else if (a->info.resync_start == 0)
4152 ddf->virt->entries[inst].init_state |= DDF_init_not;
4153 else
4154 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4155 if (old != ddf->virt->entries[inst].init_state)
4156 ddf_set_updates_pending(ddf);
4157
4158 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4159 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4160 consistent?"clean":"dirty",
4161 a->info.resync_start);
4162 return consistent;
4163 }
4164
4165 static int get_bvd_state(const struct ddf_super *ddf,
4166 const struct vd_config *vc)
4167 {
4168 unsigned int i, n_bvd, working = 0;
4169 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4170 int pd, st, state;
4171 for (i = 0; i < n_prim; i++) {
4172 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4173 continue;
4174 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4175 if (pd < 0)
4176 continue;
4177 st = be16_to_cpu(ddf->phys->entries[pd].state);
4178 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4179 == DDF_Online)
4180 working++;
4181 }
4182
4183 state = DDF_state_degraded;
4184 if (working == n_prim)
4185 state = DDF_state_optimal;
4186 else
4187 switch (vc->prl) {
4188 case DDF_RAID0:
4189 case DDF_CONCAT:
4190 case DDF_JBOD:
4191 state = DDF_state_failed;
4192 break;
4193 case DDF_RAID1:
4194 if (working == 0)
4195 state = DDF_state_failed;
4196 else if (working >= 2)
4197 state = DDF_state_part_optimal;
4198 break;
4199 case DDF_RAID4:
4200 case DDF_RAID5:
4201 if (working < n_prim - 1)
4202 state = DDF_state_failed;
4203 break;
4204 case DDF_RAID6:
4205 if (working < n_prim - 2)
4206 state = DDF_state_failed;
4207 else if (working == n_prim - 1)
4208 state = DDF_state_part_optimal;
4209 break;
4210 }
4211 return state;
4212 }
4213
4214 static int secondary_state(int state, int other, int seclevel)
4215 {
4216 if (state == DDF_state_optimal && other == DDF_state_optimal)
4217 return DDF_state_optimal;
4218 if (seclevel == DDF_2MIRRORED) {
4219 if (state == DDF_state_optimal || other == DDF_state_optimal)
4220 return DDF_state_part_optimal;
4221 if (state == DDF_state_failed && other == DDF_state_failed)
4222 return DDF_state_failed;
4223 return DDF_state_degraded;
4224 } else {
4225 if (state == DDF_state_failed || other == DDF_state_failed)
4226 return DDF_state_failed;
4227 if (state == DDF_state_degraded || other == DDF_state_degraded)
4228 return DDF_state_degraded;
4229 return DDF_state_part_optimal;
4230 }
4231 }
4232
4233 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4234 {
4235 int state = get_bvd_state(ddf, &vcl->conf);
4236 unsigned int i;
4237 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4238 state = secondary_state(
4239 state,
4240 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4241 vcl->conf.srl);
4242 }
4243 return state;
4244 }
4245
4246 /*
4247 * The state of each disk is stored in the global phys_disk structure
4248 * in phys_disk.entries[n].state.
4249 * This makes various combinations awkward.
4250 * - When a device fails in any array, it must be failed in all arrays
4251 * that include a part of this device.
4252 * - When a component is rebuilding, we cannot include it officially in the
4253 * array unless this is the only array that uses the device.
4254 *
4255 * So: when transitioning:
4256 * Online -> failed, just set failed flag. monitor will propagate
4257 * spare -> online, the device might need to be added to the array.
4258 * spare -> failed, just set failed. Don't worry if in array or not.
4259 */
4260 static void ddf_set_disk(struct active_array *a, int n, int state)
4261 {
4262 struct ddf_super *ddf = a->container->sb;
4263 unsigned int inst = a->info.container_member, n_bvd;
4264 struct vcl *vcl;
4265 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4266 &n_bvd, &vcl);
4267 int pd;
4268 struct mdinfo *mdi;
4269 struct dl *dl;
4270
4271 dprintf("%s: %d to %x\n", __func__, n, state);
4272 if (vc == NULL) {
4273 dprintf("ddf: cannot find instance %d!!\n", inst);
4274 return;
4275 }
4276 /* Find the matching slot in 'info'. */
4277 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4278 if (mdi->disk.raid_disk == n)
4279 break;
4280 if (!mdi) {
4281 pr_err("%s: cannot find raid disk %d\n",
4282 __func__, n);
4283 return;
4284 }
4285
4286 /* and find the 'dl' entry corresponding to that. */
4287 for (dl = ddf->dlist; dl; dl = dl->next)
4288 if (mdi->state_fd >= 0 &&
4289 mdi->disk.major == dl->major &&
4290 mdi->disk.minor == dl->minor)
4291 break;
4292 if (!dl) {
4293 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4294 __func__, n,
4295 mdi->disk.major, mdi->disk.minor);
4296 return;
4297 }
4298
4299 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4300 if (pd < 0 || pd != dl->pdnum) {
4301 /* disk doesn't currently exist or has changed.
4302 * If it is now in_sync, insert it. */
4303 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4304 __func__, dl->pdnum, dl->major, dl->minor,
4305 be32_to_cpu(dl->disk.refnum));
4306 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4307 __func__, inst, n_bvd,
4308 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4309 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4310 pd = dl->pdnum; /* FIXME: is this really correct ? */
4311 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4312 LBA_OFFSET(ddf, vc)[n_bvd] =
4313 cpu_to_be64(mdi->data_offset);
4314 be16_clear(ddf->phys->entries[pd].type,
4315 cpu_to_be16(DDF_Global_Spare));
4316 be16_set(ddf->phys->entries[pd].type,
4317 cpu_to_be16(DDF_Active_in_VD));
4318 ddf_set_updates_pending(ddf);
4319 }
4320 } else {
4321 be16 old = ddf->phys->entries[pd].state;
4322 if (state & DS_FAULTY)
4323 be16_set(ddf->phys->entries[pd].state,
4324 cpu_to_be16(DDF_Failed));
4325 if (state & DS_INSYNC) {
4326 be16_set(ddf->phys->entries[pd].state,
4327 cpu_to_be16(DDF_Online));
4328 be16_clear(ddf->phys->entries[pd].state,
4329 cpu_to_be16(DDF_Rebuilding));
4330 }
4331 if (!be16_eq(old, ddf->phys->entries[pd].state))
4332 ddf_set_updates_pending(ddf);
4333 }
4334
4335 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4336 be32_to_cpu(dl->disk.refnum), state,
4337 be16_to_cpu(ddf->phys->entries[pd].state));
4338
4339 /* Now we need to check the state of the array and update
4340 * virtual_disk.entries[n].state.
4341 * It needs to be one of "optimal", "degraded", "failed".
4342 * I don't understand 'deleted' or 'missing'.
4343 */
4344 state = get_svd_state(ddf, vcl);
4345
4346 if (ddf->virt->entries[inst].state !=
4347 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4348 | state)) {
4349
4350 ddf->virt->entries[inst].state =
4351 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4352 | state;
4353 ddf_set_updates_pending(ddf);
4354 }
4355
4356 }
4357
4358 static void ddf_sync_metadata(struct supertype *st)
4359 {
4360
4361 /*
4362 * Write all data to all devices.
4363 * Later, we might be able to track whether only local changes
4364 * have been made, or whether any global data has been changed,
4365 * but ddf is sufficiently weird that it probably always
4366 * changes global data ....
4367 */
4368 struct ddf_super *ddf = st->sb;
4369 if (!ddf->updates_pending)
4370 return;
4371 ddf->updates_pending = 0;
4372 __write_init_super_ddf(st);
4373 dprintf("ddf: sync_metadata\n");
4374 }
4375
4376 static int del_from_conflist(struct vcl **list, const char *guid)
4377 {
4378 struct vcl **p;
4379 int found = 0;
4380 for (p = list; p && *p; p = &((*p)->next))
4381 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4382 found = 1;
4383 *p = (*p)->next;
4384 }
4385 return found;
4386 }
4387
4388 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4389 {
4390 struct dl *dl;
4391 unsigned int vdnum, i;
4392 vdnum = find_vde_by_guid(ddf, guid);
4393 if (vdnum == DDF_NOTFOUND) {
4394 pr_err("%s: could not find VD %s\n", __func__,
4395 guid_str(guid));
4396 return -1;
4397 }
4398 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4399 pr_err("%s: could not find conf %s\n", __func__,
4400 guid_str(guid));
4401 return -1;
4402 }
4403 for (dl = ddf->dlist; dl; dl = dl->next)
4404 for (i = 0; i < ddf->max_part; i++)
4405 if (dl->vlist[i] != NULL &&
4406 !memcmp(dl->vlist[i]->conf.guid, guid,
4407 DDF_GUID_LEN))
4408 dl->vlist[i] = NULL;
4409 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4410 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4411 return 0;
4412 }
4413
4414 static int kill_subarray_ddf(struct supertype *st)
4415 {
4416 struct ddf_super *ddf = st->sb;
4417 /*
4418 * currentconf is set in container_content_ddf,
4419 * called with subarray arg
4420 */
4421 struct vcl *victim = ddf->currentconf;
4422 struct vd_config *conf;
4423 ddf->currentconf = NULL;
4424 unsigned int vdnum;
4425 if (!victim) {
4426 pr_err("%s: nothing to kill\n", __func__);
4427 return -1;
4428 }
4429 conf = &victim->conf;
4430 vdnum = find_vde_by_guid(ddf, conf->guid);
4431 if (vdnum == DDF_NOTFOUND) {
4432 pr_err("%s: could not find VD %s\n", __func__,
4433 guid_str(conf->guid));
4434 return -1;
4435 }
4436 if (st->update_tail) {
4437 struct virtual_disk *vd;
4438 int len = sizeof(struct virtual_disk)
4439 + sizeof(struct virtual_entry);
4440 vd = xmalloc(len);
4441 if (vd == NULL) {
4442 pr_err("%s: failed to allocate %d bytes\n", __func__,
4443 len);
4444 return -1;
4445 }
4446 memset(vd, 0 , len);
4447 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4448 vd->populated_vdes = cpu_to_be16(0);
4449 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4450 /* we use DDF_state_deleted as marker */
4451 vd->entries[0].state = DDF_state_deleted;
4452 append_metadata_update(st, vd, len);
4453 } else {
4454 _kill_subarray_ddf(ddf, conf->guid);
4455 ddf_set_updates_pending(ddf);
4456 ddf_sync_metadata(st);
4457 }
4458 return 0;
4459 }
4460
4461 static void copy_matching_bvd(struct ddf_super *ddf,
4462 struct vd_config *conf,
4463 const struct metadata_update *update)
4464 {
4465 unsigned int mppe =
4466 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4467 unsigned int len = ddf->conf_rec_len * 512;
4468 char *p;
4469 struct vd_config *vc;
4470 for (p = update->buf; p < update->buf + update->len; p += len) {
4471 vc = (struct vd_config *) p;
4472 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4473 memcpy(conf->phys_refnum, vc->phys_refnum,
4474 mppe * (sizeof(__u32) + sizeof(__u64)));
4475 return;
4476 }
4477 }
4478 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4479 conf->sec_elmnt_seq, guid_str(conf->guid));
4480 }
4481
4482 static void ddf_process_update(struct supertype *st,
4483 struct metadata_update *update)
4484 {
4485 /* Apply this update to the metadata.
4486 * The first 4 bytes are a DDF_*_MAGIC which guides
4487 * our actions.
4488 * Possible update are:
4489 * DDF_PHYS_RECORDS_MAGIC
4490 * Add a new physical device or remove an old one.
4491 * Changes to this record only happen implicitly.
4492 * used_pdes is the device number.
4493 * DDF_VIRT_RECORDS_MAGIC
4494 * Add a new VD. Possibly also change the 'access' bits.
4495 * populated_vdes is the entry number.
4496 * DDF_VD_CONF_MAGIC
4497 * New or updated VD. the VIRT_RECORD must already
4498 * exist. For an update, phys_refnum and lba_offset
4499 * (at least) are updated, and the VD_CONF must
4500 * be written to precisely those devices listed with
4501 * a phys_refnum.
4502 * DDF_SPARE_ASSIGN_MAGIC
4503 * replacement Spare Assignment Record... but for which device?
4504 *
4505 * So, e.g.:
4506 * - to create a new array, we send a VIRT_RECORD and
4507 * a VD_CONF. Then assemble and start the array.
4508 * - to activate a spare we send a VD_CONF to add the phys_refnum
4509 * and offset. This will also mark the spare as active with
4510 * a spare-assignment record.
4511 */
4512 struct ddf_super *ddf = st->sb;
4513 be32 *magic = (be32 *)update->buf;
4514 struct phys_disk *pd;
4515 struct virtual_disk *vd;
4516 struct vd_config *vc;
4517 struct vcl *vcl;
4518 struct dl *dl;
4519 unsigned int ent;
4520 unsigned int pdnum, pd2, len;
4521
4522 dprintf("Process update %x\n", be32_to_cpu(*magic));
4523
4524 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4525
4526 if (update->len != (sizeof(struct phys_disk) +
4527 sizeof(struct phys_disk_entry)))
4528 return;
4529 pd = (struct phys_disk*)update->buf;
4530
4531 ent = be16_to_cpu(pd->used_pdes);
4532 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4533 return;
4534 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4535 struct dl **dlp;
4536 /* removing this disk. */
4537 be16_set(ddf->phys->entries[ent].state,
4538 cpu_to_be16(DDF_Missing));
4539 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4540 struct dl *dl = *dlp;
4541 if (dl->pdnum == (signed)ent) {
4542 close(dl->fd);
4543 dl->fd = -1;
4544 /* FIXME this doesn't free
4545 * dl->devname */
4546 update->space = dl;
4547 *dlp = dl->next;
4548 break;
4549 }
4550 }
4551 ddf_set_updates_pending(ddf);
4552 return;
4553 }
4554 if (!all_ff(ddf->phys->entries[ent].guid))
4555 return;
4556 ddf->phys->entries[ent] = pd->entries[0];
4557 ddf->phys->used_pdes = cpu_to_be16
4558 (1 + be16_to_cpu(ddf->phys->used_pdes));
4559 ddf_set_updates_pending(ddf);
4560 if (ddf->add_list) {
4561 struct active_array *a;
4562 struct dl *al = ddf->add_list;
4563 ddf->add_list = al->next;
4564
4565 al->next = ddf->dlist;
4566 ddf->dlist = al;
4567
4568 /* As a device has been added, we should check
4569 * for any degraded devices that might make
4570 * use of this spare */
4571 for (a = st->arrays ; a; a=a->next)
4572 a->check_degraded = 1;
4573 }
4574 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4575
4576 if (update->len != (sizeof(struct virtual_disk) +
4577 sizeof(struct virtual_entry)))
4578 return;
4579 vd = (struct virtual_disk*)update->buf;
4580
4581 if (vd->entries[0].state == DDF_state_deleted) {
4582 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4583 return;
4584 } else {
4585
4586 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4587 if (ent != DDF_NOTFOUND) {
4588 dprintf("%s: VD %s exists already in slot %d\n",
4589 __func__, guid_str(vd->entries[0].guid),
4590 ent);
4591 return;
4592 }
4593 ent = find_unused_vde(ddf);
4594 if (ent == DDF_NOTFOUND)
4595 return;
4596 ddf->virt->entries[ent] = vd->entries[0];
4597 ddf->virt->populated_vdes =
4598 cpu_to_be16(
4599 1 + be16_to_cpu(
4600 ddf->virt->populated_vdes));
4601 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4602 __func__, guid_str(vd->entries[0].guid), ent,
4603 ddf->virt->entries[ent].state,
4604 ddf->virt->entries[ent].init_state);
4605 }
4606 ddf_set_updates_pending(ddf);
4607 }
4608
4609 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4610 vc = (struct vd_config*)update->buf;
4611 len = ddf->conf_rec_len * 512;
4612 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4613 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4614 __func__, guid_str(vc->guid), update->len,
4615 vc->sec_elmnt_count);
4616 return;
4617 }
4618 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4619 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4620 break;
4621 dprintf("%s: conf update for %s (%s)\n", __func__,
4622 guid_str(vc->guid), (vcl ? "old" : "new"));
4623 if (vcl) {
4624 /* An update, just copy the phys_refnum and lba_offset
4625 * fields
4626 */
4627 unsigned int i;
4628 unsigned int k;
4629 copy_matching_bvd(ddf, &vcl->conf, update);
4630 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4631 dprintf("BVD %u has %08x at %llu\n", 0,
4632 be32_to_cpu(vcl->conf.phys_refnum[k]),
4633 be64_to_cpu(LBA_OFFSET(ddf,
4634 &vcl->conf)[k]));
4635 for (i = 1; i < vc->sec_elmnt_count; i++) {
4636 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4637 update);
4638 for (k = 0; k < be16_to_cpu(
4639 vc->prim_elmnt_count); k++)
4640 dprintf("BVD %u has %08x at %llu\n", i,
4641 be32_to_cpu
4642 (vcl->other_bvds[i-1]->
4643 phys_refnum[k]),
4644 be64_to_cpu
4645 (LBA_OFFSET
4646 (ddf,
4647 vcl->other_bvds[i-1])[k]));
4648 }
4649 } else {
4650 /* A new VD_CONF */
4651 unsigned int i;
4652 if (!update->space)
4653 return;
4654 vcl = update->space;
4655 update->space = NULL;
4656 vcl->next = ddf->conflist;
4657 memcpy(&vcl->conf, vc, len);
4658 ent = find_vde_by_guid(ddf, vc->guid);
4659 if (ent == DDF_NOTFOUND)
4660 return;
4661 vcl->vcnum = ent;
4662 ddf->conflist = vcl;
4663 for (i = 1; i < vc->sec_elmnt_count; i++)
4664 memcpy(vcl->other_bvds[i-1],
4665 update->buf + len * i, len);
4666 }
4667 /* Set DDF_Transition on all Failed devices - to help
4668 * us detect those that are no longer in use
4669 */
4670 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4671 pdnum++)
4672 if (be16_and(ddf->phys->entries[pdnum].state,
4673 cpu_to_be16(DDF_Failed)))
4674 be16_set(ddf->phys->entries[pdnum].state,
4675 cpu_to_be16(DDF_Transition));
4676 /* Now make sure vlist is correct for each dl. */
4677 for (dl = ddf->dlist; dl; dl = dl->next) {
4678 unsigned int vn = 0;
4679 int in_degraded = 0;
4680 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4681 unsigned int dn, ibvd;
4682 const struct vd_config *conf;
4683 int vstate;
4684 dn = get_pd_index_from_refnum(vcl,
4685 dl->disk.refnum,
4686 ddf->mppe,
4687 &conf, &ibvd);
4688 if (dn == DDF_NOTFOUND)
4689 continue;
4690 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4691 dl->pdnum,
4692 be32_to_cpu(dl->disk.refnum),
4693 guid_str(conf->guid),
4694 conf->sec_elmnt_seq, vn);
4695 /* Clear the Transition flag */
4696 if (be16_and
4697 (ddf->phys->entries[dl->pdnum].state,
4698 cpu_to_be16(DDF_Failed)))
4699 be16_clear(ddf->phys
4700 ->entries[dl->pdnum].state,
4701 cpu_to_be16(DDF_Transition));
4702 dl->vlist[vn++] = vcl;
4703 vstate = ddf->virt->entries[vcl->vcnum].state
4704 & DDF_state_mask;
4705 if (vstate == DDF_state_degraded ||
4706 vstate == DDF_state_part_optimal)
4707 in_degraded = 1;
4708 }
4709 while (vn < ddf->max_part)
4710 dl->vlist[vn++] = NULL;
4711 if (dl->vlist[0]) {
4712 be16_clear(ddf->phys->entries[dl->pdnum].type,
4713 cpu_to_be16(DDF_Global_Spare));
4714 if (!be16_and(ddf->phys
4715 ->entries[dl->pdnum].type,
4716 cpu_to_be16(DDF_Active_in_VD))) {
4717 be16_set(ddf->phys
4718 ->entries[dl->pdnum].type,
4719 cpu_to_be16(DDF_Active_in_VD));
4720 if (in_degraded)
4721 be16_set(ddf->phys
4722 ->entries[dl->pdnum]
4723 .state,
4724 cpu_to_be16
4725 (DDF_Rebuilding));
4726 }
4727 }
4728 if (dl->spare) {
4729 be16_clear(ddf->phys->entries[dl->pdnum].type,
4730 cpu_to_be16(DDF_Global_Spare));
4731 be16_set(ddf->phys->entries[dl->pdnum].type,
4732 cpu_to_be16(DDF_Spare));
4733 }
4734 if (!dl->vlist[0] && !dl->spare) {
4735 be16_set(ddf->phys->entries[dl->pdnum].type,
4736 cpu_to_be16(DDF_Global_Spare));
4737 be16_clear(ddf->phys->entries[dl->pdnum].type,
4738 cpu_to_be16(DDF_Spare));
4739 be16_clear(ddf->phys->entries[dl->pdnum].type,
4740 cpu_to_be16(DDF_Active_in_VD));
4741 }
4742 }
4743
4744 /* Now remove any 'Failed' devices that are not part
4745 * of any VD. They will have the Transition flag set.
4746 * Once done, we need to update all dl->pdnum numbers.
4747 */
4748 pd2 = 0;
4749 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4750 pdnum++) {
4751 if (be16_and(ddf->phys->entries[pdnum].state,
4752 cpu_to_be16(DDF_Failed))
4753 && be16_and(ddf->phys->entries[pdnum].state,
4754 cpu_to_be16(DDF_Transition))) {
4755 /* skip this one unless in dlist*/
4756 for (dl = ddf->dlist; dl; dl = dl->next)
4757 if (dl->pdnum == (int)pdnum)
4758 break;
4759 if (!dl)
4760 continue;
4761 }
4762 if (pdnum == pd2)
4763 pd2++;
4764 else {
4765 ddf->phys->entries[pd2] =
4766 ddf->phys->entries[pdnum];
4767 for (dl = ddf->dlist; dl; dl = dl->next)
4768 if (dl->pdnum == (int)pdnum)
4769 dl->pdnum = pd2;
4770 pd2++;
4771 }
4772 }
4773 ddf->phys->used_pdes = cpu_to_be16(pd2);
4774 while (pd2 < pdnum) {
4775 memset(ddf->phys->entries[pd2].guid, 0xff,
4776 DDF_GUID_LEN);
4777 pd2++;
4778 }
4779
4780 ddf_set_updates_pending(ddf);
4781 }
4782 /* case DDF_SPARE_ASSIGN_MAGIC */
4783 }
4784
4785 static void ddf_prepare_update(struct supertype *st,
4786 struct metadata_update *update)
4787 {
4788 /* This update arrived at managemon.
4789 * We are about to pass it to monitor.
4790 * If a malloc is needed, do it here.
4791 */
4792 struct ddf_super *ddf = st->sb;
4793 be32 *magic = (be32 *)update->buf;
4794 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4795 struct vcl *vcl;
4796 struct vd_config *conf = (struct vd_config *) update->buf;
4797 if (posix_memalign(&update->space, 512,
4798 offsetof(struct vcl, conf)
4799 + ddf->conf_rec_len * 512) != 0) {
4800 update->space = NULL;
4801 return;
4802 }
4803 vcl = update->space;
4804 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4805 if (alloc_other_bvds(ddf, vcl) != 0) {
4806 free(update->space);
4807 update->space = NULL;
4808 }
4809 }
4810 }
4811
4812 /*
4813 * Check degraded state of a RAID10.
4814 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4815 */
4816 static int raid10_degraded(struct mdinfo *info)
4817 {
4818 int n_prim, n_bvds;
4819 int i;
4820 struct mdinfo *d;
4821 char *found;
4822 int ret = -1;
4823
4824 n_prim = info->array.layout & ~0x100;
4825 n_bvds = info->array.raid_disks / n_prim;
4826 found = xmalloc(n_bvds);
4827 if (found == NULL)
4828 return ret;
4829 memset(found, 0, n_bvds);
4830 for (d = info->devs; d; d = d->next) {
4831 i = d->disk.raid_disk / n_prim;
4832 if (i >= n_bvds) {
4833 pr_err("%s: BUG: invalid raid disk\n", __func__);
4834 goto out;
4835 }
4836 if (d->state_fd > 0)
4837 found[i]++;
4838 }
4839 ret = 2;
4840 for (i = 0; i < n_bvds; i++)
4841 if (!found[i]) {
4842 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4843 ret = 0;
4844 goto out;
4845 } else if (found[i] < n_prim) {
4846 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4847 n_bvds);
4848 ret = 1;
4849 }
4850 out:
4851 free(found);
4852 return ret;
4853 }
4854
4855 /*
4856 * Check if the array 'a' is degraded but not failed.
4857 * If it is, find as many spares as are available and needed and
4858 * arrange for their inclusion.
4859 * We only choose devices which are not already in the array,
4860 * and prefer those with a spare-assignment to this array.
4861 * otherwise we choose global spares - assuming always that
4862 * there is enough room.
4863 * For each spare that we assign, we return an 'mdinfo' which
4864 * describes the position for the device in the array.
4865 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4866 * the new phys_refnum and lba_offset values.
4867 *
4868 * Only worry about BVDs at the moment.
4869 */
4870 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4871 struct metadata_update **updates)
4872 {
4873 int working = 0;
4874 struct mdinfo *d;
4875 struct ddf_super *ddf = a->container->sb;
4876 int global_ok = 0;
4877 struct mdinfo *rv = NULL;
4878 struct mdinfo *di;
4879 struct metadata_update *mu;
4880 struct dl *dl;
4881 int i;
4882 unsigned int j;
4883 struct vcl *vcl;
4884 struct vd_config *vc;
4885 unsigned int n_bvd;
4886
4887 for (d = a->info.devs ; d ; d = d->next) {
4888 if ((d->curr_state & DS_FAULTY) &&
4889 d->state_fd >= 0)
4890 /* wait for Removal to happen */
4891 return NULL;
4892 if (d->state_fd >= 0)
4893 working ++;
4894 }
4895
4896 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4897 a->info.array.raid_disks,
4898 a->info.array.level);
4899 if (working == a->info.array.raid_disks)
4900 return NULL; /* array not degraded */
4901 switch (a->info.array.level) {
4902 case 1:
4903 if (working == 0)
4904 return NULL; /* failed */
4905 break;
4906 case 4:
4907 case 5:
4908 if (working < a->info.array.raid_disks - 1)
4909 return NULL; /* failed */
4910 break;
4911 case 6:
4912 if (working < a->info.array.raid_disks - 2)
4913 return NULL; /* failed */
4914 break;
4915 case 10:
4916 if (raid10_degraded(&a->info) < 1)
4917 return NULL;
4918 break;
4919 default: /* concat or stripe */
4920 return NULL; /* failed */
4921 }
4922
4923 /* For each slot, if it is not working, find a spare */
4924 dl = ddf->dlist;
4925 for (i = 0; i < a->info.array.raid_disks; i++) {
4926 for (d = a->info.devs ; d ; d = d->next)
4927 if (d->disk.raid_disk == i)
4928 break;
4929 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4930 if (d && (d->state_fd >= 0))
4931 continue;
4932
4933 /* OK, this device needs recovery. Find a spare */
4934 again:
4935 for ( ; dl ; dl = dl->next) {
4936 unsigned long long esize;
4937 unsigned long long pos;
4938 struct mdinfo *d2;
4939 int is_global = 0;
4940 int is_dedicated = 0;
4941 struct extent *ex;
4942 unsigned int j;
4943 be16 state = ddf->phys->entries[dl->pdnum].state;
4944 if (be16_and(state,
4945 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4946 !be16_and(state,
4947 cpu_to_be16(DDF_Online)))
4948 continue;
4949
4950 /* If in this array, skip */
4951 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4952 if (d2->state_fd >= 0 &&
4953 d2->disk.major == dl->major &&
4954 d2->disk.minor == dl->minor) {
4955 dprintf("%x:%x (%08x) already in array\n",
4956 dl->major, dl->minor,
4957 be32_to_cpu(dl->disk.refnum));
4958 break;
4959 }
4960 if (d2)
4961 continue;
4962 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4963 cpu_to_be16(DDF_Spare))) {
4964 /* Check spare assign record */
4965 if (dl->spare) {
4966 if (dl->spare->type & DDF_spare_dedicated) {
4967 /* check spare_ents for guid */
4968 for (j = 0 ;
4969 j < be16_to_cpu
4970 (dl->spare
4971 ->populated);
4972 j++) {
4973 if (memcmp(dl->spare->spare_ents[j].guid,
4974 ddf->virt->entries[a->info.container_member].guid,
4975 DDF_GUID_LEN) == 0)
4976 is_dedicated = 1;
4977 }
4978 } else
4979 is_global = 1;
4980 }
4981 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
4982 cpu_to_be16(DDF_Global_Spare))) {
4983 is_global = 1;
4984 } else if (!be16_and(ddf->phys
4985 ->entries[dl->pdnum].state,
4986 cpu_to_be16(DDF_Failed))) {
4987 /* we can possibly use some of this */
4988 is_global = 1;
4989 }
4990 if ( ! (is_dedicated ||
4991 (is_global && global_ok))) {
4992 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4993 is_dedicated, is_global);
4994 continue;
4995 }
4996
4997 /* We are allowed to use this device - is there space?
4998 * We need a->info.component_size sectors */
4999 ex = get_extents(ddf, dl);
5000 if (!ex) {
5001 dprintf("cannot get extents\n");
5002 continue;
5003 }
5004 j = 0; pos = 0;
5005 esize = 0;
5006
5007 do {
5008 esize = ex[j].start - pos;
5009 if (esize >= a->info.component_size)
5010 break;
5011 pos = ex[j].start + ex[j].size;
5012 j++;
5013 } while (ex[j-1].size);
5014
5015 free(ex);
5016 if (esize < a->info.component_size) {
5017 dprintf("%x:%x has no room: %llu %llu\n",
5018 dl->major, dl->minor,
5019 esize, a->info.component_size);
5020 /* No room */
5021 continue;
5022 }
5023
5024 /* Cool, we have a device with some space at pos */
5025 di = xcalloc(1, sizeof(*di));
5026 di->disk.number = i;
5027 di->disk.raid_disk = i;
5028 di->disk.major = dl->major;
5029 di->disk.minor = dl->minor;
5030 di->disk.state = 0;
5031 di->recovery_start = 0;
5032 di->data_offset = pos;
5033 di->component_size = a->info.component_size;
5034 di->container_member = dl->pdnum;
5035 di->next = rv;
5036 rv = di;
5037 dprintf("%x:%x (%08x) to be %d at %llu\n",
5038 dl->major, dl->minor,
5039 be32_to_cpu(dl->disk.refnum), i, pos);
5040
5041 break;
5042 }
5043 if (!dl && ! global_ok) {
5044 /* not enough dedicated spares, try global */
5045 global_ok = 1;
5046 dl = ddf->dlist;
5047 goto again;
5048 }
5049 }
5050
5051 if (!rv)
5052 /* No spares found */
5053 return rv;
5054 /* Now 'rv' has a list of devices to return.
5055 * Create a metadata_update record to update the
5056 * phys_refnum and lba_offset values
5057 */
5058 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5059 &n_bvd, &vcl);
5060 if (vc == NULL)
5061 return NULL;
5062
5063 mu = xmalloc(sizeof(*mu));
5064 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5065 free(mu);
5066 mu = NULL;
5067 }
5068
5069 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5070 mu->buf = xmalloc(mu->len);
5071 mu->space = NULL;
5072 mu->space_list = NULL;
5073 mu->next = *updates;
5074 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5075 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5076 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5077 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5078
5079 vc = (struct vd_config*)mu->buf;
5080 for (di = rv ; di ; di = di->next) {
5081 unsigned int i_sec, i_prim;
5082 i_sec = di->disk.raid_disk
5083 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5084 i_prim = di->disk.raid_disk
5085 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5086 vc = (struct vd_config *)(mu->buf
5087 + i_sec * ddf->conf_rec_len * 512);
5088 for (dl = ddf->dlist; dl; dl = dl->next)
5089 if (dl->major == di->disk.major
5090 && dl->minor == di->disk.minor)
5091 break;
5092 if (!dl) {
5093 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
5094 __func__, di->disk.raid_disk,
5095 di->disk.major, di->disk.minor);
5096 return NULL;
5097 }
5098 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5099 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5100 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5101 be32_to_cpu(vc->phys_refnum[i_prim]),
5102 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5103 }
5104 *updates = mu;
5105 return rv;
5106 }
5107 #endif /* MDASSEMBLE */
5108
5109 static int ddf_level_to_layout(int level)
5110 {
5111 switch(level) {
5112 case 0:
5113 case 1:
5114 return 0;
5115 case 5:
5116 return ALGORITHM_LEFT_SYMMETRIC;
5117 case 6:
5118 return ALGORITHM_ROTATING_N_CONTINUE;
5119 case 10:
5120 return 0x102;
5121 default:
5122 return UnSet;
5123 }
5124 }
5125
5126 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5127 {
5128 if (level && *level == UnSet)
5129 *level = LEVEL_CONTAINER;
5130
5131 if (level && layout && *layout == UnSet)
5132 *layout = ddf_level_to_layout(*level);
5133 }
5134
5135 struct superswitch super_ddf = {
5136 #ifndef MDASSEMBLE
5137 .examine_super = examine_super_ddf,
5138 .brief_examine_super = brief_examine_super_ddf,
5139 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5140 .export_examine_super = export_examine_super_ddf,
5141 .detail_super = detail_super_ddf,
5142 .brief_detail_super = brief_detail_super_ddf,
5143 .validate_geometry = validate_geometry_ddf,
5144 .write_init_super = write_init_super_ddf,
5145 .add_to_super = add_to_super_ddf,
5146 .remove_from_super = remove_from_super_ddf,
5147 .load_container = load_container_ddf,
5148 .copy_metadata = copy_metadata_ddf,
5149 .kill_subarray = kill_subarray_ddf,
5150 #endif
5151 .match_home = match_home_ddf,
5152 .uuid_from_super= uuid_from_super_ddf,
5153 .getinfo_super = getinfo_super_ddf,
5154 .update_super = update_super_ddf,
5155
5156 .avail_size = avail_size_ddf,
5157
5158 .compare_super = compare_super_ddf,
5159
5160 .load_super = load_super_ddf,
5161 .init_super = init_super_ddf,
5162 .store_super = store_super_ddf,
5163 .free_super = free_super_ddf,
5164 .match_metadata_desc = match_metadata_desc_ddf,
5165 .container_content = container_content_ddf,
5166 .default_geometry = default_geometry_ddf,
5167
5168 .external = 1,
5169
5170 #ifndef MDASSEMBLE
5171 /* for mdmon */
5172 .open_new = ddf_open_new,
5173 .set_array_state= ddf_set_array_state,
5174 .set_disk = ddf_set_disk,
5175 .sync_metadata = ddf_sync_metadata,
5176 .process_update = ddf_process_update,
5177 .prepare_update = ddf_prepare_update,
5178 .activate_spare = ddf_activate_spare,
5179 #endif
5180 .name = "ddf",
5181 };